2024-11-16 06:24:31,086 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-11-16 06:24:31,102 main DEBUG Took 0.013136 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-16 06:24:31,103 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-16 06:24:31,103 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-16 06:24:31,106 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-16 06:24:31,109 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,124 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-16 06:24:31,150 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,152 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,152 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,153 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,153 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,154 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,155 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,156 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,157 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,157 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,158 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,159 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,160 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,160 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,161 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,162 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,162 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,163 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,164 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,164 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,165 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,167 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,168 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,169 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 06:24:31,169 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,170 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-16 06:24:31,173 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 06:24:31,175 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-16 06:24:31,178 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-16 06:24:31,179 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-16 06:24:31,180 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-16 06:24:31,181 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-16 06:24:31,194 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-16 06:24:31,198 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-16 06:24:31,200 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-16 06:24:31,201 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-16 06:24:31,201 main DEBUG createAppenders(={Console}) 2024-11-16 06:24:31,203 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-11-16 06:24:31,203 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-11-16 06:24:31,204 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-11-16 06:24:31,204 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-16 06:24:31,205 main DEBUG OutputStream closed 2024-11-16 06:24:31,205 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-16 06:24:31,206 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-16 06:24:31,206 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-11-16 06:24:31,380 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-16 06:24:31,383 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-16 06:24:31,385 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-16 06:24:31,386 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-16 06:24:31,387 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-16 06:24:31,387 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-16 06:24:31,387 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-16 06:24:31,388 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-16 06:24:31,388 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-16 06:24:31,389 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-16 06:24:31,389 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-16 06:24:31,390 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-16 06:24:31,390 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-16 06:24:31,391 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-16 06:24:31,391 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-16 06:24:31,391 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-16 06:24:31,392 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-16 06:24:31,393 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-16 06:24:31,398 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16 06:24:31,398 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-11-16 06:24:31,399 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-16 06:24:31,400 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-11-16T06:24:31,420 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-16 06:24:31,423 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-16 06:24:31,424 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16T06:24:31,960 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1 2024-11-16T06:24:31,961 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-11-16T06:24:31,963 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-11-16T06:24:32,081 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-16T06:24:32,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T06:24:32,379 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae, deleteOnExit=true 2024-11-16T06:24:32,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T06:24:32,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/test.cache.data in system properties and HBase conf 2024-11-16T06:24:32,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T06:24:32,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir in system properties and HBase conf 2024-11-16T06:24:32,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T06:24:32,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T06:24:32,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T06:24:32,499 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T06:24:32,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T06:24:32,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T06:24:32,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T06:24:32,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T06:24:32,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T06:24:32,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T06:24:32,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T06:24:32,521 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T06:24:32,522 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T06:24:32,523 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/nfs.dump.dir in system properties and HBase conf 2024-11-16T06:24:32,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/java.io.tmpdir in system properties and HBase conf 2024-11-16T06:24:32,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T06:24:32,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T06:24:32,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T06:24:33,784 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-16T06:24:33,952 INFO [Time-limited test {}] log.Log(170): Logging initialized @3992ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-16T06:24:34,101 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T06:24:34,235 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T06:24:34,278 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T06:24:34,279 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T06:24:34,282 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T06:24:34,312 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T06:24:34,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5140b357{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,AVAILABLE} 2024-11-16T06:24:34,319 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cebb95a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T06:24:34,638 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d3f6b4f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/java.io.tmpdir/jetty-localhost-40621-hadoop-hdfs-3_4_1-tests_jar-_-any-4282977197698226475/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T06:24:34,650 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:40621} 2024-11-16T06:24:34,654 INFO [Time-limited test {}] server.Server(415): Started @4691ms 2024-11-16T06:24:35,143 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T06:24:35,153 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T06:24:35,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T06:24:35,155 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T06:24:35,155 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T06:24:35,156 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37223f11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,AVAILABLE} 2024-11-16T06:24:35,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@516ed17d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T06:24:35,264 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e8ba092{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/java.io.tmpdir/jetty-localhost-44725-hadoop-hdfs-3_4_1-tests_jar-_-any-11913625810057216444/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T06:24:35,265 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@605d5872{HTTP/1.1, (http/1.1)}{localhost:44725} 2024-11-16T06:24:35,265 INFO [Time-limited test {}] server.Server(415): Started @5306ms 2024-11-16T06:24:35,330 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T06:24:35,567 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T06:24:35,580 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T06:24:35,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T06:24:35,634 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T06:24:35,634 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T06:24:35,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1800a749{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,AVAILABLE} 2024-11-16T06:24:35,651 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4109d9bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T06:24:35,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c1dd7bf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/java.io.tmpdir/jetty-localhost-34009-hadoop-hdfs-3_4_1-tests_jar-_-any-7735398086666813656/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T06:24:35,811 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51a04cd6{HTTP/1.1, (http/1.1)}{localhost:34009} 2024-11-16T06:24:35,812 INFO [Time-limited test {}] server.Server(415): Started @5853ms 2024-11-16T06:24:35,815 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T06:24:35,950 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T06:24:35,958 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T06:24:35,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T06:24:35,974 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T06:24:35,974 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T06:24:35,985 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27b64e3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,AVAILABLE} 2024-11-16T06:24:35,987 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b54b674{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T06:24:36,045 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4/current/BP-2134272360-172.17.0.2-1731738273472/current, will proceed with Du for space computation calculation, 2024-11-16T06:24:36,045 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1/current/BP-2134272360-172.17.0.2-1731738273472/current, will proceed with Du for space computation calculation, 2024-11-16T06:24:36,047 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3/current/BP-2134272360-172.17.0.2-1731738273472/current, will proceed with Du for space computation calculation, 2024-11-16T06:24:36,054 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2/current/BP-2134272360-172.17.0.2-1731738273472/current, will proceed with Du for space computation calculation, 2024-11-16T06:24:36,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cfd34d2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/java.io.tmpdir/jetty-localhost-34783-hadoop-hdfs-3_4_1-tests_jar-_-any-10568449843801376560/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T06:24:36,134 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@198d5352{HTTP/1.1, (http/1.1)}{localhost:34783} 2024-11-16T06:24:36,134 INFO [Time-limited test {}] server.Server(415): Started @6175ms 2024-11-16T06:24:36,140 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T06:24:36,161 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T06:24:36,162 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T06:24:36,288 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc14dfb48d152c403 with lease ID 0x9f491e19bf9dfdb3: Processing first storage report for DS-f00f7888-c79b-4a2a-b682-4301f96bc86d from datanode DatanodeRegistration(127.0.0.1:36185, datanodeUuid=a232a18a-a31a-4c4d-92dc-644c05f14d99, infoPort=46719, infoSecurePort=0, ipcPort=41707, storageInfo=lv=-57;cid=testClusterID;nsid=1779054455;c=1731738273472) 2024-11-16T06:24:36,290 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc14dfb48d152c403 with lease ID 0x9f491e19bf9dfdb3: from storage DS-f00f7888-c79b-4a2a-b682-4301f96bc86d node DatanodeRegistration(127.0.0.1:36185, datanodeUuid=a232a18a-a31a-4c4d-92dc-644c05f14d99, infoPort=46719, infoSecurePort=0, ipcPort=41707, storageInfo=lv=-57;cid=testClusterID;nsid=1779054455;c=1731738273472), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-16T06:24:36,290 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb550ef2b86885c55 with lease ID 0x9f491e19bf9dfdb2: Processing first storage report for DS-3defb6be-028a-4c36-bf76-0bb3d6ff9764 from datanode DatanodeRegistration(127.0.0.1:40907, datanodeUuid=45c81a13-af13-4abd-be13-e699e7afb52a, infoPort=43849, infoSecurePort=0, ipcPort=38399, storageInfo=lv=-57;cid=testClusterID;nsid=1779054455;c=1731738273472) 2024-11-16T06:24:36,291 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb550ef2b86885c55 with lease ID 0x9f491e19bf9dfdb2: from storage DS-3defb6be-028a-4c36-bf76-0bb3d6ff9764 node DatanodeRegistration(127.0.0.1:40907, datanodeUuid=45c81a13-af13-4abd-be13-e699e7afb52a, infoPort=43849, infoSecurePort=0, ipcPort=38399, storageInfo=lv=-57;cid=testClusterID;nsid=1779054455;c=1731738273472), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T06:24:36,291 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc14dfb48d152c403 with lease ID 0x9f491e19bf9dfdb3: Processing first storage report for DS-a1a1faa2-cd49-4733-a8d3-45e547d079f7 from datanode DatanodeRegistration(127.0.0.1:36185, datanodeUuid=a232a18a-a31a-4c4d-92dc-644c05f14d99, infoPort=46719, infoSecurePort=0, ipcPort=41707, storageInfo=lv=-57;cid=testClusterID;nsid=1779054455;c=1731738273472) 2024-11-16T06:24:36,292 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc14dfb48d152c403 with lease ID 0x9f491e19bf9dfdb3: from storage DS-a1a1faa2-cd49-4733-a8d3-45e547d079f7 node DatanodeRegistration(127.0.0.1:36185, datanodeUuid=a232a18a-a31a-4c4d-92dc-644c05f14d99, infoPort=46719, infoSecurePort=0, ipcPort=41707, storageInfo=lv=-57;cid=testClusterID;nsid=1779054455;c=1731738273472), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T06:24:36,293 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb550ef2b86885c55 with lease ID 0x9f491e19bf9dfdb2: Processing first storage report for DS-1366ec57-3fb0-417e-9921-b80c144970eb from datanode DatanodeRegistration(127.0.0.1:40907, datanodeUuid=45c81a13-af13-4abd-be13-e699e7afb52a, infoPort=43849, infoSecurePort=0, ipcPort=38399, storageInfo=lv=-57;cid=testClusterID;nsid=1779054455;c=1731738273472) 2024-11-16T06:24:36,293 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb550ef2b86885c55 with lease ID 0x9f491e19bf9dfdb2: from storage DS-1366ec57-3fb0-417e-9921-b80c144970eb node DatanodeRegistration(127.0.0.1:40907, datanodeUuid=45c81a13-af13-4abd-be13-e699e7afb52a, infoPort=43849, infoSecurePort=0, ipcPort=38399, storageInfo=lv=-57;cid=testClusterID;nsid=1779054455;c=1731738273472), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T06:24:36,358 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5/current/BP-2134272360-172.17.0.2-1731738273472/current, will proceed with Du for space computation calculation, 2024-11-16T06:24:36,358 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6/current/BP-2134272360-172.17.0.2-1731738273472/current, will proceed with Du for space computation calculation, 2024-11-16T06:24:36,416 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T06:24:36,426 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf08616769a5e9a2 with lease ID 0x9f491e19bf9dfdb4: Processing first storage report for DS-1ebc5369-9ac9-4d7a-92d4-567a577c8101 from datanode DatanodeRegistration(127.0.0.1:33197, datanodeUuid=5d296949-a934-44b1-8205-e41aa36a0acd, infoPort=44369, infoSecurePort=0, ipcPort=43747, storageInfo=lv=-57;cid=testClusterID;nsid=1779054455;c=1731738273472) 2024-11-16T06:24:36,426 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf08616769a5e9a2 with lease ID 0x9f491e19bf9dfdb4: from storage DS-1ebc5369-9ac9-4d7a-92d4-567a577c8101 node DatanodeRegistration(127.0.0.1:33197, datanodeUuid=5d296949-a934-44b1-8205-e41aa36a0acd, infoPort=44369, infoSecurePort=0, ipcPort=43747, storageInfo=lv=-57;cid=testClusterID;nsid=1779054455;c=1731738273472), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T06:24:36,427 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf08616769a5e9a2 with lease ID 0x9f491e19bf9dfdb4: Processing first storage report for DS-deda7312-5f15-48a6-bd61-c3570dfc6785 from datanode DatanodeRegistration(127.0.0.1:33197, datanodeUuid=5d296949-a934-44b1-8205-e41aa36a0acd, infoPort=44369, infoSecurePort=0, ipcPort=43747, storageInfo=lv=-57;cid=testClusterID;nsid=1779054455;c=1731738273472) 2024-11-16T06:24:36,427 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf08616769a5e9a2 with lease ID 0x9f491e19bf9dfdb4: from storage DS-deda7312-5f15-48a6-bd61-c3570dfc6785 node DatanodeRegistration(127.0.0.1:33197, datanodeUuid=5d296949-a934-44b1-8205-e41aa36a0acd, infoPort=44369, infoSecurePort=0, ipcPort=43747, storageInfo=lv=-57;cid=testClusterID;nsid=1779054455;c=1731738273472), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T06:24:36,741 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1 2024-11-16T06:24:36,958 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/zookeeper_0, clientPort=57949, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T06:24:36,972 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57949 2024-11-16T06:24:36,988 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T06:24:36,993 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T06:24:37,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741825_1001 (size=7) 2024-11-16T06:24:37,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741825_1001 (size=7) 2024-11-16T06:24:37,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741825_1001 (size=7) 2024-11-16T06:24:37,630 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 with version=8 2024-11-16T06:24:37,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/hbase-staging 2024-11-16T06:24:37,767 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-16T06:24:38,013 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T06:24:38,028 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T06:24:38,028 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T06:24:38,035 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T06:24:38,035 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T06:24:38,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T06:24:38,209 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T06:24:38,281 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-16T06:24:38,293 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-16T06:24:38,297 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T06:24:38,330 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 105729 (auto-detected) 2024-11-16T06:24:38,331 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-16T06:24:38,351 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39381 2024-11-16T06:24:38,375 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39381 connecting to ZooKeeper ensemble=127.0.0.1:57949 2024-11-16T06:24:38,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:393810x0, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T06:24:38,415 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39381-0x100473470b00000 connected 2024-11-16T06:24:38,458 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T06:24:38,461 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T06:24:38,471 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T06:24:38,476 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7, hbase.cluster.distributed=false 2024-11-16T06:24:38,528 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T06:24:38,534 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39381 2024-11-16T06:24:38,541 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39381 2024-11-16T06:24:38,542 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39381 2024-11-16T06:24:38,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39381 2024-11-16T06:24:38,549 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39381 2024-11-16T06:24:38,731 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T06:24:38,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T06:24:38,734 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T06:24:38,734 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T06:24:38,735 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T06:24:38,735 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T06:24:38,740 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T06:24:38,745 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T06:24:38,765 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35939 2024-11-16T06:24:38,770 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35939 connecting to ZooKeeper ensemble=127.0.0.1:57949 2024-11-16T06:24:38,771 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T06:24:38,778 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T06:24:38,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359390x0, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T06:24:38,808 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:359390x0, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T06:24:38,813 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35939-0x100473470b00001 connected 2024-11-16T06:24:38,814 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T06:24:38,827 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-16T06:24:38,831 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T06:24:38,838 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T06:24:38,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35939 2024-11-16T06:24:38,842 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35939 2024-11-16T06:24:38,845 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35939 2024-11-16T06:24:38,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35939 2024-11-16T06:24:38,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35939 2024-11-16T06:24:38,872 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T06:24:38,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T06:24:38,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T06:24:38,873 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T06:24:38,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T06:24:38,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T06:24:38,873 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T06:24:38,874 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T06:24:38,875 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40155 2024-11-16T06:24:38,878 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40155 connecting to ZooKeeper ensemble=127.0.0.1:57949 2024-11-16T06:24:38,879 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T06:24:38,884 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T06:24:38,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:401550x0, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T06:24:38,895 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T06:24:38,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:401550x0, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T06:24:38,901 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40155-0x100473470b00002 connected 2024-11-16T06:24:38,902 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-16T06:24:38,904 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T06:24:38,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T06:24:38,910 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40155 2024-11-16T06:24:38,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40155 2024-11-16T06:24:38,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40155 2024-11-16T06:24:38,915 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40155 2024-11-16T06:24:38,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40155 2024-11-16T06:24:38,940 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T06:24:38,940 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T06:24:38,940 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T06:24:38,940 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T06:24:38,941 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T06:24:38,941 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T06:24:38,941 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T06:24:38,941 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T06:24:38,949 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37533 2024-11-16T06:24:38,952 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37533 connecting to ZooKeeper ensemble=127.0.0.1:57949 2024-11-16T06:24:38,953 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T06:24:38,956 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T06:24:38,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:375330x0, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T06:24:38,966 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:375330x0, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T06:24:38,966 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T06:24:38,969 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37533-0x100473470b00003 connected 2024-11-16T06:24:38,973 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-16T06:24:38,975 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T06:24:38,978 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T06:24:38,979 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37533 2024-11-16T06:24:38,979 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37533 2024-11-16T06:24:38,985 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37533 2024-11-16T06:24:38,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37533 2024-11-16T06:24:38,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37533 2024-11-16T06:24:39,006 DEBUG [M:0;3456ee6a3164:39381 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3456ee6a3164:39381 2024-11-16T06:24:39,007 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3456ee6a3164,39381,1731738277818 2024-11-16T06:24:39,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T06:24:39,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T06:24:39,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T06:24:39,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T06:24:39,018 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3456ee6a3164,39381,1731738277818 2024-11-16T06:24:39,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T06:24:39,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:39,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:39,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T06:24:39,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T06:24:39,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:39,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:39,047 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T06:24:39,048 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3456ee6a3164,39381,1731738277818 from backup master directory 2024-11-16T06:24:39,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T06:24:39,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3456ee6a3164,39381,1731738277818 2024-11-16T06:24:39,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T06:24:39,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T06:24:39,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T06:24:39,053 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T06:24:39,053 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3456ee6a3164,39381,1731738277818 2024-11-16T06:24:39,056 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-16T06:24:39,057 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-16T06:24:39,128 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/hbase.id] with ID: e571bdc5-ba58-4d7b-b324-bb9f590ba66f 2024-11-16T06:24:39,128 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.tmp/hbase.id 2024-11-16T06:24:39,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741826_1002 (size=42) 2024-11-16T06:24:39,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741826_1002 (size=42) 2024-11-16T06:24:39,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741826_1002 (size=42) 2024-11-16T06:24:39,161 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.tmp/hbase.id]:[hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/hbase.id] 2024-11-16T06:24:39,221 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T06:24:39,227 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T06:24:39,247 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-16T06:24:39,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:39,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:39,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:39,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:39,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741827_1003 (size=196) 2024-11-16T06:24:39,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741827_1003 (size=196) 2024-11-16T06:24:39,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741827_1003 (size=196) 2024-11-16T06:24:39,336 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:24:39,339 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T06:24:39,355 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T06:24:39,360 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-16T06:24:39,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741828_1004 (size=1189) 2024-11-16T06:24:39,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741828_1004 (size=1189) 2024-11-16T06:24:39,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741828_1004 (size=1189) 2024-11-16T06:24:39,416 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/data/master/store 2024-11-16T06:24:39,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741829_1005 (size=34) 2024-11-16T06:24:39,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741829_1005 (size=34) 2024-11-16T06:24:39,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741829_1005 (size=34) 2024-11-16T06:24:39,447 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-16T06:24:39,450 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:24:39,451 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T06:24:39,451 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T06:24:39,452 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T06:24:39,453 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T06:24:39,453 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T06:24:39,453 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T06:24:39,454 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731738279451Disabling compacts and flushes for region at 1731738279451Disabling writes for close at 1731738279453 (+2 ms)Writing region close event to WAL at 1731738279453Closed at 1731738279453 2024-11-16T06:24:39,457 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/data/master/store/.initializing 2024-11-16T06:24:39,457 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/WALs/3456ee6a3164,39381,1731738277818 2024-11-16T06:24:39,465 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-16T06:24:39,491 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C39381%2C1731738277818, suffix=, logDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/WALs/3456ee6a3164,39381,1731738277818, archiveDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/oldWALs, maxLogs=10 2024-11-16T06:24:39,516 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/WALs/3456ee6a3164,39381,1731738277818/3456ee6a3164%2C39381%2C1731738277818.1731738279496, exclude list is [], retry=0 2024-11-16T06:24:39,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36185,DS-f00f7888-c79b-4a2a-b682-4301f96bc86d,DISK] 2024-11-16T06:24:39,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40907,DS-3defb6be-028a-4c36-bf76-0bb3d6ff9764,DISK] 2024-11-16T06:24:39,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33197,DS-1ebc5369-9ac9-4d7a-92d4-567a577c8101,DISK] 2024-11-16T06:24:39,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-16T06:24:39,587 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/WALs/3456ee6a3164,39381,1731738277818/3456ee6a3164%2C39381%2C1731738277818.1731738279496 2024-11-16T06:24:39,593 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43849:43849),(127.0.0.1/127.0.0.1:46719:46719),(127.0.0.1/127.0.0.1:44369:44369)] 2024-11-16T06:24:39,594 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T06:24:39,594 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:24:39,600 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T06:24:39,602 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T06:24:39,648 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T06:24:39,682 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T06:24:39,689 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:39,692 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T06:24:39,692 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T06:24:39,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T06:24:39,697 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:39,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:24:39,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T06:24:39,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T06:24:39,703 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:39,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:24:39,705 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T06:24:39,709 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T06:24:39,709 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:39,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:24:39,711 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T06:24:39,715 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T06:24:39,717 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T06:24:39,726 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T06:24:39,726 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T06:24:39,730 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T06:24:39,734 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T06:24:39,739 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:24:39,741 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72449494, jitterRate=0.07958158850669861}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T06:24:39,751 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731738279620Initializing all the Stores at 1731738279622 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731738279623 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738279624 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738279624Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738279624Cleaning up temporary data from old regions at 1731738279726 (+102 ms)Region opened successfully at 1731738279751 (+25 ms) 2024-11-16T06:24:39,753 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T06:24:39,795 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d8d73d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T06:24:39,830 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T06:24:39,845 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T06:24:39,845 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T06:24:39,850 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T06:24:39,853 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 2 msec 2024-11-16T06:24:39,860 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-11-16T06:24:39,860 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T06:24:39,923 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T06:24:39,936 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T06:24:39,945 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T06:24:39,949 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T06:24:39,951 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T06:24:39,954 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T06:24:39,958 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T06:24:39,964 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T06:24:39,967 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T06:24:39,969 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T06:24:39,981 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T06:24:40,016 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T06:24:40,020 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T06:24:40,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T06:24:40,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T06:24:40,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T06:24:40,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T06:24:40,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,030 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3456ee6a3164,39381,1731738277818, sessionid=0x100473470b00000, setting cluster-up flag (Was=false) 2024-11-16T06:24:40,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,054 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T06:24:40,056 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,39381,1731738277818 2024-11-16T06:24:40,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,068 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T06:24:40,070 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,39381,1731738277818 2024-11-16T06:24:40,079 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T06:24:40,097 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(746): ClusterId : e571bdc5-ba58-4d7b-b324-bb9f590ba66f 2024-11-16T06:24:40,100 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(746): ClusterId : e571bdc5-ba58-4d7b-b324-bb9f590ba66f 2024-11-16T06:24:40,101 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T06:24:40,101 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T06:24:40,102 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(746): ClusterId : e571bdc5-ba58-4d7b-b324-bb9f590ba66f 2024-11-16T06:24:40,102 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T06:24:40,107 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T06:24:40,107 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T06:24:40,107 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T06:24:40,107 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T06:24:40,109 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T06:24:40,109 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T06:24:40,114 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T06:24:40,115 DEBUG [RS:1;3456ee6a3164:40155 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ffe0d9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T06:24:40,117 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T06:24:40,117 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T06:24:40,118 DEBUG [RS:0;3456ee6a3164:35939 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bbc76f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T06:24:40,118 DEBUG [RS:2;3456ee6a3164:37533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c03f15b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T06:24:40,130 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-16T06:24:40,150 DEBUG [RS:1;3456ee6a3164:40155 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3456ee6a3164:40155 2024-11-16T06:24:40,157 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:24:40,158 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-16T06:24:40,162 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T06:24:40,162 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T06:24:40,163 DEBUG [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-16T06:24:40,163 INFO [RS:1;3456ee6a3164:40155 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:24:40,164 DEBUG [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T06:24:40,169 DEBUG [RS:0;3456ee6a3164:35939 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3456ee6a3164:35939 2024-11-16T06:24:40,169 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T06:24:40,169 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T06:24:40,169 DEBUG [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-16T06:24:40,169 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,39381,1731738277818 with port=40155, startcode=1731738278871 2024-11-16T06:24:40,170 DEBUG [RS:2;3456ee6a3164:37533 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;3456ee6a3164:37533 2024-11-16T06:24:40,171 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T06:24:40,171 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T06:24:40,171 DEBUG [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-16T06:24:40,176 INFO [RS:2;3456ee6a3164:37533 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:24:40,176 DEBUG [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T06:24:40,177 INFO [RS:0;3456ee6a3164:35939 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:24:40,178 DEBUG [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T06:24:40,179 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,39381,1731738277818 with port=35939, startcode=1731738278639 2024-11-16T06:24:40,182 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,39381,1731738277818 with port=37533, startcode=1731738278939 2024-11-16T06:24:40,185 DEBUG [RS:1;3456ee6a3164:40155 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T06:24:40,185 DEBUG [RS:0;3456ee6a3164:35939 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T06:24:40,185 DEBUG [RS:2;3456ee6a3164:37533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T06:24:40,255 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37539, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T06:24:40,256 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40927, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T06:24:40,255 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T06:24:40,257 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53473, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T06:24:40,263 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-16T06:24:40,269 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-16T06:24:40,270 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T06:24:40,270 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-16T06:24:40,283 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T06:24:40,291 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3456ee6a3164,39381,1731738277818 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T06:24:40,302 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T06:24:40,302 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T06:24:40,302 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T06:24:40,302 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T06:24:40,302 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3456ee6a3164:0, corePoolSize=10, maxPoolSize=10 2024-11-16T06:24:40,303 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,303 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T06:24:40,303 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,305 DEBUG [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-16T06:24:40,305 DEBUG [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-16T06:24:40,305 DEBUG [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-16T06:24:40,305 WARN [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-16T06:24:40,305 WARN [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-16T06:24:40,305 WARN [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-16T06:24:40,322 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T06:24:40,322 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T06:24:40,325 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731738310325 2024-11-16T06:24:40,327 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T06:24:40,329 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T06:24:40,329 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:40,330 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T06:24:40,332 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T06:24:40,333 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T06:24:40,333 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T06:24:40,333 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T06:24:40,342 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,353 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T06:24:40,355 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T06:24:40,356 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T06:24:40,359 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T06:24:40,361 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T06:24:40,366 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731738280363,5,FailOnTimeoutGroup] 2024-11-16T06:24:40,367 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731738280366,5,FailOnTimeoutGroup] 2024-11-16T06:24:40,367 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,367 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T06:24:40,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741831_1007 (size=1321) 2024-11-16T06:24:40,368 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,369 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741831_1007 (size=1321) 2024-11-16T06:24:40,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741831_1007 (size=1321) 2024-11-16T06:24:40,375 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T06:24:40,375 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:24:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741832_1008 (size=32) 2024-11-16T06:24:40,407 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,39381,1731738277818 with port=35939, startcode=1731738278639 2024-11-16T06:24:40,407 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,39381,1731738277818 with port=40155, startcode=1731738278871 2024-11-16T06:24:40,408 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,39381,1731738277818 with port=37533, startcode=1731738278939 2024-11-16T06:24:40,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741832_1008 (size=32) 2024-11-16T06:24:40,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741832_1008 (size=32) 2024-11-16T06:24:40,410 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3456ee6a3164,40155,1731738278871 2024-11-16T06:24:40,411 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:24:40,412 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381 {}] master.ServerManager(517): Registering regionserver=3456ee6a3164,40155,1731738278871 2024-11-16T06:24:40,423 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3456ee6a3164,37533,1731738278939 2024-11-16T06:24:40,424 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381 {}] master.ServerManager(517): Registering regionserver=3456ee6a3164,37533,1731738278939 2024-11-16T06:24:40,426 DEBUG [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:24:40,427 DEBUG [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36767 2024-11-16T06:24:40,427 DEBUG [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T06:24:40,427 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T06:24:40,430 DEBUG [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:24:40,431 DEBUG [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36767 2024-11-16T06:24:40,431 DEBUG [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T06:24:40,431 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3456ee6a3164,35939,1731738278639 2024-11-16T06:24:40,431 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381 {}] master.ServerManager(517): Registering regionserver=3456ee6a3164,35939,1731738278639 2024-11-16T06:24:40,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T06:24:40,435 DEBUG [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:24:40,435 DEBUG [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36767 2024-11-16T06:24:40,436 DEBUG [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T06:24:40,436 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T06:24:40,436 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:40,437 DEBUG [RS:1;3456ee6a3164:40155 {}] zookeeper.ZKUtil(111): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3456ee6a3164,40155,1731738278871 2024-11-16T06:24:40,438 WARN [RS:1;3456ee6a3164:40155 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T06:24:40,438 INFO [RS:1;3456ee6a3164:40155 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-16T06:24:40,438 DEBUG [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,40155,1731738278871 2024-11-16T06:24:40,438 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T06:24:40,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T06:24:40,439 DEBUG [RS:0;3456ee6a3164:35939 {}] zookeeper.ZKUtil(111): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3456ee6a3164,35939,1731738278639 2024-11-16T06:24:40,439 DEBUG [RS:2;3456ee6a3164:37533 {}] zookeeper.ZKUtil(111): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3456ee6a3164,37533,1731738278939 2024-11-16T06:24:40,439 WARN [RS:0;3456ee6a3164:35939 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T06:24:40,439 WARN [RS:2;3456ee6a3164:37533 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T06:24:40,439 INFO [RS:0;3456ee6a3164:35939 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-16T06:24:40,439 INFO [RS:2;3456ee6a3164:37533 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-16T06:24:40,439 DEBUG [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,35939,1731738278639 2024-11-16T06:24:40,439 DEBUG [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,37533,1731738278939 2024-11-16T06:24:40,442 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T06:24:40,442 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:40,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T06:24:40,447 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T06:24:40,450 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3456ee6a3164,37533,1731738278939] 2024-11-16T06:24:40,450 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3456ee6a3164,35939,1731738278639] 2024-11-16T06:24:40,450 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3456ee6a3164,40155,1731738278871] 2024-11-16T06:24:40,455 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T06:24:40,455 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:40,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T06:24:40,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T06:24:40,461 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T06:24:40,461 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:40,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T06:24:40,463 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T06:24:40,464 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740 2024-11-16T06:24:40,465 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740 2024-11-16T06:24:40,468 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T06:24:40,468 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T06:24:40,469 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T06:24:40,471 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T06:24:40,475 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T06:24:40,475 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T06:24:40,475 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T06:24:40,477 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:24:40,478 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72992128, jitterRate=0.08766746520996094}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T06:24:40,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731738280411Initializing all the Stores at 1731738280414 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731738280414Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731738280419 (+5 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738280420 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731738280420Cleaning up temporary data from old regions at 1731738280468 (+48 ms)Region opened successfully at 1731738280481 (+13 ms) 2024-11-16T06:24:40,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T06:24:40,482 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T06:24:40,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T06:24:40,483 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T06:24:40,483 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T06:24:40,484 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T06:24:40,484 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731738280482Disabling compacts and flushes for region at 1731738280482Disabling writes for close at 1731738280483 (+1 ms)Writing region close event to WAL at 1731738280484 (+1 ms)Closed at 1731738280484 2024-11-16T06:24:40,487 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T06:24:40,487 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T06:24:40,495 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T06:24:40,499 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T06:24:40,499 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T06:24:40,499 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T06:24:40,506 INFO [RS:2;3456ee6a3164:37533 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T06:24:40,506 INFO [RS:0;3456ee6a3164:35939 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T06:24:40,506 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,506 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,507 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T06:24:40,508 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T06:24:40,508 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T06:24:40,511 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T06:24:40,513 INFO [RS:1;3456ee6a3164:40155 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T06:24:40,513 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,515 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T06:24:40,515 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T06:24:40,517 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T06:24:40,517 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,517 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,518 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,518 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,518 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,518 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,518 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,518 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,518 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,518 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,518 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,518 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,518 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T06:24:40,518 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T06:24:40,519 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T06:24:40,519 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,519 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,519 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,520 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T06:24:40,520 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,520 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T06:24:40,520 DEBUG [RS:0;3456ee6a3164:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T06:24:40,520 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,520 DEBUG [RS:2;3456ee6a3164:37533 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T06:24:40,520 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,520 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T06:24:40,520 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,520 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,520 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,520 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,521 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,521 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T06:24:40,521 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T06:24:40,521 DEBUG [RS:1;3456ee6a3164:40155 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T06:24:40,525 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,525 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,525 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,525 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,525 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,526 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,526 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,526 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,526 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,35939,1731738278639-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T06:24:40,526 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,526 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,526 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,37533,1731738278939-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T06:24:40,558 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,558 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,558 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,558 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,558 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,558 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,40155,1731738278871-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T06:24:40,580 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T06:24:40,580 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T06:24:40,583 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,35939,1731738278639-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,583 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,37533,1731738278939-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,583 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,583 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,583 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.Replication(171): 3456ee6a3164,37533,1731738278939 started 2024-11-16T06:24:40,583 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.Replication(171): 3456ee6a3164,35939,1731738278639 started 2024-11-16T06:24:40,584 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T06:24:40,584 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,40155,1731738278871-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,585 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,585 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.Replication(171): 3456ee6a3164,40155,1731738278871 started 2024-11-16T06:24:40,601 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,601 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,602 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(1482): Serving as 3456ee6a3164,35939,1731738278639, RpcServer on 3456ee6a3164/172.17.0.2:35939, sessionid=0x100473470b00001 2024-11-16T06:24:40,602 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(1482): Serving as 3456ee6a3164,40155,1731738278871, RpcServer on 3456ee6a3164/172.17.0.2:40155, sessionid=0x100473470b00002 2024-11-16T06:24:40,603 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T06:24:40,603 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T06:24:40,603 DEBUG [RS:0;3456ee6a3164:35939 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3456ee6a3164,35939,1731738278639 2024-11-16T06:24:40,603 DEBUG [RS:1;3456ee6a3164:40155 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3456ee6a3164,40155,1731738278871 2024-11-16T06:24:40,603 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,35939,1731738278639' 2024-11-16T06:24:40,603 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,40155,1731738278871' 2024-11-16T06:24:40,604 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T06:24:40,604 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T06:24:40,605 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T06:24:40,605 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T06:24:40,605 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T06:24:40,605 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T06:24:40,605 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T06:24:40,605 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T06:24:40,605 DEBUG [RS:1;3456ee6a3164:40155 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3456ee6a3164,40155,1731738278871 2024-11-16T06:24:40,605 DEBUG [RS:0;3456ee6a3164:35939 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3456ee6a3164,35939,1731738278639 2024-11-16T06:24:40,605 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,40155,1731738278871' 2024-11-16T06:24:40,605 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,35939,1731738278639' 2024-11-16T06:24:40,605 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T06:24:40,605 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T06:24:40,606 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:40,606 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(1482): Serving as 3456ee6a3164,37533,1731738278939, RpcServer on 3456ee6a3164/172.17.0.2:37533, sessionid=0x100473470b00003 2024-11-16T06:24:40,606 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T06:24:40,606 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T06:24:40,606 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T06:24:40,606 DEBUG [RS:2;3456ee6a3164:37533 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3456ee6a3164,37533,1731738278939 2024-11-16T06:24:40,606 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,37533,1731738278939' 2024-11-16T06:24:40,606 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T06:24:40,607 DEBUG [RS:1;3456ee6a3164:40155 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T06:24:40,607 INFO [RS:1;3456ee6a3164:40155 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T06:24:40,607 DEBUG [RS:0;3456ee6a3164:35939 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T06:24:40,607 INFO [RS:1;3456ee6a3164:40155 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T06:24:40,607 INFO [RS:0;3456ee6a3164:35939 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T06:24:40,607 INFO [RS:0;3456ee6a3164:35939 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T06:24:40,607 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T06:24:40,608 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T06:24:40,608 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T06:24:40,608 DEBUG [RS:2;3456ee6a3164:37533 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3456ee6a3164,37533,1731738278939 2024-11-16T06:24:40,608 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,37533,1731738278939' 2024-11-16T06:24:40,608 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T06:24:40,608 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T06:24:40,609 DEBUG [RS:2;3456ee6a3164:37533 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T06:24:40,609 INFO [RS:2;3456ee6a3164:37533 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T06:24:40,609 INFO [RS:2;3456ee6a3164:37533 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T06:24:40,663 WARN [3456ee6a3164:39381 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T06:24:40,712 INFO [RS:2;3456ee6a3164:37533 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-16T06:24:40,712 INFO [RS:1;3456ee6a3164:40155 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-16T06:24:40,712 INFO [RS:0;3456ee6a3164:35939 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-16T06:24:40,715 INFO [RS:0;3456ee6a3164:35939 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C35939%2C1731738278639, suffix=, logDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,35939,1731738278639, archiveDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/oldWALs, maxLogs=32 2024-11-16T06:24:40,715 INFO [RS:1;3456ee6a3164:40155 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C40155%2C1731738278871, suffix=, logDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,40155,1731738278871, archiveDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/oldWALs, maxLogs=32 2024-11-16T06:24:40,715 INFO [RS:2;3456ee6a3164:37533 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C37533%2C1731738278939, suffix=, logDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,37533,1731738278939, archiveDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/oldWALs, maxLogs=32 2024-11-16T06:24:40,731 DEBUG [RS:0;3456ee6a3164:35939 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,35939,1731738278639/3456ee6a3164%2C35939%2C1731738278639.1731738280719, exclude list is [], retry=0 2024-11-16T06:24:40,732 DEBUG [RS:2;3456ee6a3164:37533 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,37533,1731738278939/3456ee6a3164%2C37533%2C1731738278939.1731738280719, exclude list is [], retry=0 2024-11-16T06:24:40,737 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33197,DS-1ebc5369-9ac9-4d7a-92d4-567a577c8101,DISK] 2024-11-16T06:24:40,737 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40907,DS-3defb6be-028a-4c36-bf76-0bb3d6ff9764,DISK] 2024-11-16T06:24:40,737 DEBUG [RS:1;3456ee6a3164:40155 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,40155,1731738278871/3456ee6a3164%2C40155%2C1731738278871.1731738280719, exclude list is [], retry=0 2024-11-16T06:24:40,737 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40907,DS-3defb6be-028a-4c36-bf76-0bb3d6ff9764,DISK] 2024-11-16T06:24:40,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33197,DS-1ebc5369-9ac9-4d7a-92d4-567a577c8101,DISK] 2024-11-16T06:24:40,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36185,DS-f00f7888-c79b-4a2a-b682-4301f96bc86d,DISK] 2024-11-16T06:24:40,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36185,DS-f00f7888-c79b-4a2a-b682-4301f96bc86d,DISK] 2024-11-16T06:24:40,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40907,DS-3defb6be-028a-4c36-bf76-0bb3d6ff9764,DISK] 2024-11-16T06:24:40,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36185,DS-f00f7888-c79b-4a2a-b682-4301f96bc86d,DISK] 2024-11-16T06:24:40,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33197,DS-1ebc5369-9ac9-4d7a-92d4-567a577c8101,DISK] 2024-11-16T06:24:40,764 INFO [RS:2;3456ee6a3164:37533 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,37533,1731738278939/3456ee6a3164%2C37533%2C1731738278939.1731738280719 2024-11-16T06:24:40,765 INFO [RS:0;3456ee6a3164:35939 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,35939,1731738278639/3456ee6a3164%2C35939%2C1731738278639.1731738280719 2024-11-16T06:24:40,766 DEBUG [RS:2;3456ee6a3164:37533 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44369:44369),(127.0.0.1/127.0.0.1:43849:43849),(127.0.0.1/127.0.0.1:46719:46719)] 2024-11-16T06:24:40,767 DEBUG [RS:0;3456ee6a3164:35939 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43849:43849),(127.0.0.1/127.0.0.1:44369:44369),(127.0.0.1/127.0.0.1:46719:46719)] 2024-11-16T06:24:40,768 INFO [RS:1;3456ee6a3164:40155 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,40155,1731738278871/3456ee6a3164%2C40155%2C1731738278871.1731738280719 2024-11-16T06:24:40,768 DEBUG [RS:1;3456ee6a3164:40155 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44369:44369),(127.0.0.1/127.0.0.1:43849:43849),(127.0.0.1/127.0.0.1:46719:46719)] 2024-11-16T06:24:40,916 DEBUG [3456ee6a3164:39381 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-16T06:24:40,927 DEBUG [3456ee6a3164:39381 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:24:40,937 DEBUG [3456ee6a3164:39381 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:24:40,937 DEBUG [3456ee6a3164:39381 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:24:40,937 DEBUG [3456ee6a3164:39381 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:24:40,938 DEBUG [3456ee6a3164:39381 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:24:40,938 DEBUG [3456ee6a3164:39381 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:24:40,938 DEBUG [3456ee6a3164:39381 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:24:40,938 INFO [3456ee6a3164:39381 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:24:40,938 INFO [3456ee6a3164:39381 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:24:40,938 INFO [3456ee6a3164:39381 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:24:40,938 DEBUG [3456ee6a3164:39381 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:24:40,946 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:24:40,956 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,37533,1731738278939, state=OPENING 2024-11-16T06:24:40,962 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T06:24:40,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:40,966 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T06:24:40,967 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T06:24:40,967 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T06:24:40,968 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T06:24:40,971 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T06:24:40,974 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:24:41,159 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T06:24:41,162 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50359, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T06:24:41,180 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T06:24:41,181 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-16T06:24:41,182 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-16T06:24:41,197 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C37533%2C1731738278939.meta, suffix=.meta, logDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,37533,1731738278939, archiveDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/oldWALs, maxLogs=32 2024-11-16T06:24:41,219 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,37533,1731738278939/3456ee6a3164%2C37533%2C1731738278939.meta.1731738281199.meta, exclude list is [], retry=0 2024-11-16T06:24:41,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40907,DS-3defb6be-028a-4c36-bf76-0bb3d6ff9764,DISK] 2024-11-16T06:24:41,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33197,DS-1ebc5369-9ac9-4d7a-92d4-567a577c8101,DISK] 2024-11-16T06:24:41,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36185,DS-f00f7888-c79b-4a2a-b682-4301f96bc86d,DISK] 2024-11-16T06:24:41,244 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,37533,1731738278939/3456ee6a3164%2C37533%2C1731738278939.meta.1731738281199.meta 2024-11-16T06:24:41,245 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43849:43849),(127.0.0.1/127.0.0.1:44369:44369),(127.0.0.1/127.0.0.1:46719:46719)] 2024-11-16T06:24:41,246 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T06:24:41,248 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-16T06:24:41,249 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:24:41,250 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T06:24:41,252 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T06:24:41,254 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T06:24:41,268 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T06:24:41,269 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:24:41,269 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T06:24:41,269 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T06:24:41,282 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T06:24:41,285 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T06:24:41,285 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:41,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T06:24:41,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T06:24:41,288 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T06:24:41,288 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:41,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T06:24:41,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T06:24:41,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T06:24:41,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:41,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T06:24:41,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T06:24:41,295 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T06:24:41,296 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:41,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T06:24:41,301 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T06:24:41,304 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740 2024-11-16T06:24:41,309 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740 2024-11-16T06:24:41,316 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T06:24:41,316 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T06:24:41,318 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T06:24:41,333 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T06:24:41,341 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71413747, jitterRate=0.06414775550365448}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T06:24:41,341 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T06:24:41,349 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731738281270Writing region info on filesystem at 1731738281270Initializing all the Stores at 1731738281275 (+5 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731738281275Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731738281281 (+6 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738281281Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731738281281Cleaning up temporary data from old regions at 1731738281316 (+35 ms)Running coprocessor post-open hooks at 1731738281341 (+25 ms)Region opened successfully at 1731738281348 (+7 ms) 2024-11-16T06:24:41,370 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731738281148 2024-11-16T06:24:41,389 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T06:24:41,390 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T06:24:41,391 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:24:41,394 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,37533,1731738278939, state=OPEN 2024-11-16T06:24:41,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T06:24:41,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T06:24:41,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T06:24:41,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T06:24:41,397 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T06:24:41,397 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T06:24:41,397 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T06:24:41,397 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T06:24:41,397 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3456ee6a3164,37533,1731738278939 2024-11-16T06:24:41,405 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T06:24:41,405 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,37533,1731738278939 in 424 msec 2024-11-16T06:24:41,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T06:24:41,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 912 msec 2024-11-16T06:24:41,421 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T06:24:41,421 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T06:24:41,448 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:24:41,450 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:24:41,477 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:24:41,480 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33797, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:24:41,504 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3310 sec 2024-11-16T06:24:41,504 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731738281504, completionTime=-1 2024-11-16T06:24:41,508 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-16T06:24:41,508 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T06:24:41,563 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-16T06:24:41,563 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731738341563 2024-11-16T06:24:41,563 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731738401563 2024-11-16T06:24:41,564 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 55 msec 2024-11-16T06:24:41,567 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-16T06:24:41,586 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,39381,1731738277818-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:41,587 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,39381,1731738277818-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:41,587 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,39381,1731738277818-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:41,589 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3456ee6a3164:39381, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:41,593 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:41,595 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:41,598 DEBUG [master/3456ee6a3164:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T06:24:41,655 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.601sec 2024-11-16T06:24:41,656 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T06:24:41,658 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T06:24:41,659 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T06:24:41,659 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T06:24:41,660 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T06:24:41,661 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,39381,1731738277818-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T06:24:41,662 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,39381,1731738277818-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T06:24:41,737 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69b19ad7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:41,742 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-16T06:24:41,742 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-16T06:24:41,744 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T06:24:41,745 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 3456ee6a3164,39381,1731738277818 2024-11-16T06:24:41,748 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6646d4b7 2024-11-16T06:24:41,748 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:24:41,749 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T06:24:41,751 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:24:41,752 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35735, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T06:24:41,759 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T06:24:41,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-16T06:24:41,797 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:24:41,797 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:24:41,798 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:41,806 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-16T06:24:41,806 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:24:41,806 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:24:41,807 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@105d4e8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:41,807 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:24:41,808 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:24:41,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T06:24:41,816 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:24:41,838 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:41,843 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56202, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:24:41,846 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e446bb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:41,847 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:24:41,857 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:24:41,858 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:24:41,879 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38634, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:24:41,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3456ee6a3164,39381,1731738277818 2024-11-16T06:24:41,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-16T06:24:41,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/test.cache.data in system properties and HBase conf 2024-11-16T06:24:41,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T06:24:41,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir in system properties and HBase conf 2024-11-16T06:24:41,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T06:24:41,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T06:24:41,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T06:24:41,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T06:24:41,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T06:24:41,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T06:24:41,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T06:24:41,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T06:24:41,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T06:24:41,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T06:24:41,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T06:24:41,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T06:24:41,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/nfs.dump.dir in system properties and HBase conf 2024-11-16T06:24:41,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/java.io.tmpdir in system properties and HBase conf 2024-11-16T06:24:41,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T06:24:41,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T06:24:41,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T06:24:41,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741837_1013 (size=349) 2024-11-16T06:24:41,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741837_1013 (size=349) 2024-11-16T06:24:41,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741837_1013 (size=349) 2024-11-16T06:24:41,925 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d0bbbb84adc872abb747d3f48a8c9338, NAME => 'hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:24:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T06:24:42,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741838_1014 (size=36) 2024-11-16T06:24:42,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741838_1014 (size=36) 2024-11-16T06:24:42,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741838_1014 (size=36) 2024-11-16T06:24:42,034 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:24:42,035 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing d0bbbb84adc872abb747d3f48a8c9338, disabling compactions & flushes 2024-11-16T06:24:42,035 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. 2024-11-16T06:24:42,036 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. 2024-11-16T06:24:42,036 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. after waiting 0 ms 2024-11-16T06:24:42,036 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. 2024-11-16T06:24:42,036 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. 2024-11-16T06:24:42,036 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for d0bbbb84adc872abb747d3f48a8c9338: Waiting for close lock at 1731738282035Disabling compacts and flushes for region at 1731738282035Disabling writes for close at 1731738282036 (+1 ms)Writing region close event to WAL at 1731738282036Closed at 1731738282036 2024-11-16T06:24:42,040 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:24:42,048 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1731738282041"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738282041"}]},"ts":"1731738282041"} 2024-11-16T06:24:42,058 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T06:24:42,066 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:24:42,075 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738282066"}]},"ts":"1731738282066"} 2024-11-16T06:24:42,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741839_1015 (size=592039) 2024-11-16T06:24:42,082 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-16T06:24:42,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741839_1015 (size=592039) 2024-11-16T06:24:42,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741839_1015 (size=592039) 2024-11-16T06:24:42,083 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:24:42,093 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:24:42,093 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:24:42,093 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:24:42,093 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:24:42,093 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:24:42,093 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:24:42,093 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:24:42,093 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:24:42,093 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:24:42,093 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:24:42,097 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=d0bbbb84adc872abb747d3f48a8c9338, ASSIGN}] 2024-11-16T06:24:42,100 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=d0bbbb84adc872abb747d3f48a8c9338, ASSIGN 2024-11-16T06:24:42,107 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=d0bbbb84adc872abb747d3f48a8c9338, ASSIGN; state=OFFLINE, location=3456ee6a3164,35939,1731738278639; forceNewPlan=false, retain=false 2024-11-16T06:24:42,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T06:24:42,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741840_1016 (size=1663647) 2024-11-16T06:24:42,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741840_1016 (size=1663647) 2024-11-16T06:24:42,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741840_1016 (size=1663647) 2024-11-16T06:24:42,261 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-16T06:24:42,262 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d0bbbb84adc872abb747d3f48a8c9338, regionState=OPENING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:24:42,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=d0bbbb84adc872abb747d3f48a8c9338, ASSIGN because future has completed 2024-11-16T06:24:42,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d0bbbb84adc872abb747d3f48a8c9338, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:24:42,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T06:24:42,468 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T06:24:42,498 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48763, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T06:24:42,532 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. 2024-11-16T06:24:42,532 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d0bbbb84adc872abb747d3f48a8c9338, NAME => 'hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338.', STARTKEY => '', ENDKEY => ''} 2024-11-16T06:24:42,533 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. service=AccessControlService 2024-11-16T06:24:42,533 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:24:42,534 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:24:42,534 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:24:42,534 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:24:42,534 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:24:42,541 INFO [StoreOpener-d0bbbb84adc872abb747d3f48a8c9338-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:24:42,559 INFO [StoreOpener-d0bbbb84adc872abb747d3f48a8c9338-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0bbbb84adc872abb747d3f48a8c9338 columnFamilyName l 2024-11-16T06:24:42,559 DEBUG [StoreOpener-d0bbbb84adc872abb747d3f48a8c9338-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:42,570 INFO [StoreOpener-d0bbbb84adc872abb747d3f48a8c9338-1 {}] regionserver.HStore(327): Store=d0bbbb84adc872abb747d3f48a8c9338/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:24:42,570 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:24:42,574 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/acl/d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:24:42,575 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/acl/d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:24:42,576 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:24:42,576 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:24:42,582 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:24:42,599 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/acl/d0bbbb84adc872abb747d3f48a8c9338/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:24:42,601 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened d0bbbb84adc872abb747d3f48a8c9338; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69014841, jitterRate=0.028401270508766174}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:24:42,601 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:24:42,605 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d0bbbb84adc872abb747d3f48a8c9338: Running coprocessor pre-open hook at 1731738282535Writing region info on filesystem at 1731738282535Initializing all the Stores at 1731738282537 (+2 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731738282537Cleaning up temporary data from old regions at 1731738282577 (+40 ms)Running coprocessor post-open hooks at 1731738282601 (+24 ms)Region opened successfully at 1731738282605 (+4 ms) 2024-11-16T06:24:42,609 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., pid=6, masterSystemTime=1731738282467 2024-11-16T06:24:42,614 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. 2024-11-16T06:24:42,614 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. 2024-11-16T06:24:42,616 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d0bbbb84adc872abb747d3f48a8c9338, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:24:42,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d0bbbb84adc872abb747d3f48a8c9338, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:24:42,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T06:24:42,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d0bbbb84adc872abb747d3f48a8c9338, server=3456ee6a3164,35939,1731738278639 in 354 msec 2024-11-16T06:24:42,652 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T06:24:42,652 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=d0bbbb84adc872abb747d3f48a8c9338, ASSIGN in 550 msec 2024-11-16T06:24:42,655 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:24:42,655 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738282655"}]},"ts":"1731738282655"} 2024-11-16T06:24:42,660 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-16T06:24:42,663 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:24:42,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 900 msec 2024-11-16T06:24:42,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T06:24:42,976 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-16T06:24:43,006 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T06:24:43,008 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T06:24:43,009 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,39381,1731738277818-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T06:24:43,886 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T06:24:43,990 WARN [Thread-384 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T06:24:44,307 INFO [Thread-384 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T06:24:44,307 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-16T06:24:44,308 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T06:24:44,332 INFO [Thread-384 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T06:24:44,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T06:24:44,333 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T06:24:44,333 INFO [Thread-384 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T06:24:44,333 INFO [Thread-384 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T06:24:44,333 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T06:24:44,333 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34f9a9b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,AVAILABLE} 2024-11-16T06:24:44,334 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d2aeec9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-16T06:24:44,340 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T06:24:44,354 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b1c64f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,AVAILABLE} 2024-11-16T06:24:44,355 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55b63922{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-16T06:24:44,559 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-11-16T06:24:44,559 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-11-16T06:24:44,559 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-16T06:24:44,561 INFO [Thread-384 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-16T06:24:44,622 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-16T06:24:45,243 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-16T06:24:45,693 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-16T06:24:45,723 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b4553ed{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/java.io.tmpdir/jetty-localhost-41947-hadoop-yarn-common-3_4_1_jar-_-any-2753779632329704600/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-16T06:24:45,723 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7000b80c{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/java.io.tmpdir/jetty-localhost-45811-hadoop-yarn-common-3_4_1_jar-_-any-14472503798515222747/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-16T06:24:45,724 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@60579b67{HTTP/1.1, (http/1.1)}{localhost:41947} 2024-11-16T06:24:45,724 INFO [Time-limited test {}] server.Server(415): Started @15765ms 2024-11-16T06:24:45,725 INFO [Thread-384 {}] server.AbstractConnector(333): Started ServerConnector@2fcaeaf0{HTTP/1.1, (http/1.1)}{localhost:45811} 2024-11-16T06:24:45,725 INFO [Thread-384 {}] server.Server(415): Started @15766ms 2024-11-16T06:24:45,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741841_1017 (size=5) 2024-11-16T06:24:45,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741841_1017 (size=5) 2024-11-16T06:24:45,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741841_1017 (size=5) 2024-11-16T06:24:46,784 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:24:46,885 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-16T06:24:46,892 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T06:24:46,901 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-16T06:24:46,903 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T06:24:46,928 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-16T06:24:46,929 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T06:24:46,932 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T06:24:46,932 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T06:24:46,933 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T06:24:46,934 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T06:24:46,937 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e3a7a80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,AVAILABLE} 2024-11-16T06:24:46,938 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1662dfcb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-16T06:24:46,994 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-16T06:24:46,995 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-16T06:24:46,995 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-16T06:24:46,995 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-16T06:24:47,014 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-16T06:24:47,048 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-16T06:24:47,220 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-16T06:24:47,233 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f7b7652{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/java.io.tmpdir/jetty-localhost-44909-hadoop-yarn-common-3_4_1_jar-_-any-2214860190740937377/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-16T06:24:47,234 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ee86169{HTTP/1.1, (http/1.1)}{localhost:44909} 2024-11-16T06:24:47,234 INFO [Time-limited test {}] server.Server(415): Started @17275ms 2024-11-16T06:24:47,458 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-16T06:24:47,460 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T06:24:47,478 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-16T06:24:47,479 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T06:24:47,482 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T06:24:47,482 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T06:24:47,483 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T06:24:47,486 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T06:24:47,488 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5342a4dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,AVAILABLE} 2024-11-16T06:24:47,489 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ca5bf15{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-16T06:24:47,574 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-16T06:24:47,575 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-16T06:24:47,575 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-16T06:24:47,575 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-16T06:24:47,607 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-16T06:24:47,623 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-16T06:24:47,771 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-16T06:24:47,776 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fe3aeaf{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/java.io.tmpdir/jetty-localhost-36911-hadoop-yarn-common-3_4_1_jar-_-any-6877295386553666063/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-16T06:24:47,777 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7edb01a7{HTTP/1.1, (http/1.1)}{localhost:36911} 2024-11-16T06:24:47,777 INFO [Time-limited test {}] server.Server(415): Started @17818ms 2024-11-16T06:24:47,811 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-16T06:24:47,813 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:24:47,863 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=718, OpenFileDescriptor=792, MaxFileDescriptor=1048576, SystemLoadAverage=262, ProcessCount=11, AvailableMemoryMB=3984 2024-11-16T06:24:47,865 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=718 is superior to 500 2024-11-16T06:24:47,871 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T06:24:47,881 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 3456ee6a3164,39381,1731738277818 2024-11-16T06:24:47,881 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5f9e6d0a 2024-11-16T06:24:47,882 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T06:24:47,885 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56218, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T06:24:47,887 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:24:47,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-16T06:24:47,895 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:24:47,897 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 7 2024-11-16T06:24:47,899 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:24:47,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T06:24:47,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741842_1018 (size=442) 2024-11-16T06:24:47,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741842_1018 (size=442) 2024-11-16T06:24:47,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741842_1018 (size=442) 2024-11-16T06:24:47,929 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 29e4933fe7f7918fc359164fcdde2bce, NAME => 'testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:24:47,930 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 791faad6072c5de4c2a3b58f0311f4ed, NAME => 'testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:24:47,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741843_1019 (size=67) 2024-11-16T06:24:47,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741843_1019 (size=67) 2024-11-16T06:24:47,968 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:24:47,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741843_1019 (size=67) 2024-11-16T06:24:47,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing 791faad6072c5de4c2a3b58f0311f4ed, disabling compactions & flushes 2024-11-16T06:24:47,969 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:24:47,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:24:47,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. after waiting 0 ms 2024-11-16T06:24:47,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:24:47,969 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:24:47,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for 791faad6072c5de4c2a3b58f0311f4ed: Waiting for close lock at 1731738287968Disabling compacts and flushes for region at 1731738287968Disabling writes for close at 1731738287969 (+1 ms)Writing region close event to WAL at 1731738287969Closed at 1731738287969 2024-11-16T06:24:47,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741844_1020 (size=67) 2024-11-16T06:24:47,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741844_1020 (size=67) 2024-11-16T06:24:47,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741844_1020 (size=67) 2024-11-16T06:24:47,982 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:24:47,983 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing 29e4933fe7f7918fc359164fcdde2bce, disabling compactions & flushes 2024-11-16T06:24:47,983 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:47,983 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:47,983 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. after waiting 0 ms 2024-11-16T06:24:47,983 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:47,983 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:47,983 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for 29e4933fe7f7918fc359164fcdde2bce: Waiting for close lock at 1731738287983Disabling compacts and flushes for region at 1731738287983Disabling writes for close at 1731738287983Writing region close event to WAL at 1731738287983Closed at 1731738287983 2024-11-16T06:24:47,985 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:24:47,986 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1731738287985"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738287985"}]},"ts":"1731738287985"} 2024-11-16T06:24:47,986 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1731738287985"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738287985"}]},"ts":"1731738287985"} 2024-11-16T06:24:48,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T06:24:48,046 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-16T06:24:48,049 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:24:48,049 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738288049"}]},"ts":"1731738288049"} 2024-11-16T06:24:48,057 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-16T06:24:48,057 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:24:48,070 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:24:48,070 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:24:48,070 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:24:48,071 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:24:48,071 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:24:48,071 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:24:48,071 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:24:48,071 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:24:48,071 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:24:48,071 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:24:48,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29e4933fe7f7918fc359164fcdde2bce, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=791faad6072c5de4c2a3b58f0311f4ed, ASSIGN}] 2024-11-16T06:24:48,075 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=791faad6072c5de4c2a3b58f0311f4ed, ASSIGN 2024-11-16T06:24:48,075 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29e4933fe7f7918fc359164fcdde2bce, ASSIGN 2024-11-16T06:24:48,077 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=791faad6072c5de4c2a3b58f0311f4ed, ASSIGN; state=OFFLINE, location=3456ee6a3164,37533,1731738278939; forceNewPlan=false, retain=false 2024-11-16T06:24:48,077 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29e4933fe7f7918fc359164fcdde2bce, ASSIGN; state=OFFLINE, location=3456ee6a3164,35939,1731738278639; forceNewPlan=false, retain=false 2024-11-16T06:24:48,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T06:24:48,228 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-16T06:24:48,228 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=29e4933fe7f7918fc359164fcdde2bce, regionState=OPENING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:24:48,228 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=791faad6072c5de4c2a3b58f0311f4ed, regionState=OPENING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:24:48,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=791faad6072c5de4c2a3b58f0311f4ed, ASSIGN because future has completed 2024-11-16T06:24:48,234 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:24:48,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29e4933fe7f7918fc359164fcdde2bce, ASSIGN because future has completed 2024-11-16T06:24:48,236 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 29e4933fe7f7918fc359164fcdde2bce, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:24:48,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:24:48,276 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-16T06:24:48,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T06:24:48,277 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T06:24:48,278 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-16T06:24:48,279 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-16T06:24:48,280 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:24:48,280 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-16T06:24:48,280 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-16T06:24:48,280 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-16T06:24:48,280 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:24:48,280 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-16T06:24:48,281 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T06:24:48,281 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T06:24:48,281 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T06:24:48,281 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T06:24:48,401 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:48,401 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 29e4933fe7f7918fc359164fcdde2bce, NAME => 'testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce.', STARTKEY => '', ENDKEY => '1'} 2024-11-16T06:24:48,402 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. service=AccessControlService 2024-11-16T06:24:48,402 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:24:48,402 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:48,402 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:24:48,402 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:48,403 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:48,406 INFO [StoreOpener-29e4933fe7f7918fc359164fcdde2bce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:48,416 INFO [StoreOpener-29e4933fe7f7918fc359164fcdde2bce-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29e4933fe7f7918fc359164fcdde2bce columnFamilyName cf 2024-11-16T06:24:48,420 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:24:48,420 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 791faad6072c5de4c2a3b58f0311f4ed, NAME => 'testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed.', STARTKEY => '1', ENDKEY => ''} 2024-11-16T06:24:48,421 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. service=AccessControlService 2024-11-16T06:24:48,421 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:24:48,421 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:48,421 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:24:48,421 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:48,422 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:48,424 DEBUG [StoreOpener-29e4933fe7f7918fc359164fcdde2bce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:48,426 INFO [StoreOpener-29e4933fe7f7918fc359164fcdde2bce-1 {}] regionserver.HStore(327): Store=29e4933fe7f7918fc359164fcdde2bce/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:24:48,427 INFO [StoreOpener-791faad6072c5de4c2a3b58f0311f4ed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:48,427 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:48,429 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:48,430 INFO [StoreOpener-791faad6072c5de4c2a3b58f0311f4ed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 791faad6072c5de4c2a3b58f0311f4ed columnFamilyName cf 2024-11-16T06:24:48,430 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:48,431 DEBUG [StoreOpener-791faad6072c5de4c2a3b58f0311f4ed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:48,431 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:48,432 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:48,432 INFO [StoreOpener-791faad6072c5de4c2a3b58f0311f4ed-1 {}] regionserver.HStore(327): Store=791faad6072c5de4c2a3b58f0311f4ed/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:24:48,432 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:48,434 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:48,435 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:48,435 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:48,436 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:48,436 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:48,439 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:48,443 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:24:48,445 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 29e4933fe7f7918fc359164fcdde2bce; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59560247, jitterRate=-0.11248315870761871}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:24:48,445 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:48,446 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 29e4933fe7f7918fc359164fcdde2bce: Running coprocessor pre-open hook at 1731738288403Writing region info on filesystem at 1731738288403Initializing all the Stores at 1731738288404 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738288404Cleaning up temporary data from old regions at 1731738288432 (+28 ms)Running coprocessor post-open hooks at 1731738288445 (+13 ms)Region opened successfully at 1731738288446 (+1 ms) 2024-11-16T06:24:48,447 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:24:48,448 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce., pid=11, masterSystemTime=1731738288393 2024-11-16T06:24:48,448 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 791faad6072c5de4c2a3b58f0311f4ed; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73375266, jitterRate=0.09337666630744934}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:24:48,448 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:48,449 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 791faad6072c5de4c2a3b58f0311f4ed: Running coprocessor pre-open hook at 1731738288422Writing region info on filesystem at 1731738288422Initializing all the Stores at 1731738288425 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738288425Cleaning up temporary data from old regions at 1731738288436 (+11 ms)Running coprocessor post-open hooks at 1731738288448 (+12 ms)Region opened successfully at 1731738288448 2024-11-16T06:24:48,450 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed., pid=10, masterSystemTime=1731738288388 2024-11-16T06:24:48,452 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:48,452 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:48,454 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=29e4933fe7f7918fc359164fcdde2bce, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:24:48,455 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:24:48,455 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:24:48,457 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=791faad6072c5de4c2a3b58f0311f4ed, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:24:48,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 29e4933fe7f7918fc359164fcdde2bce, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:24:48,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:24:48,467 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-11-16T06:24:48,468 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 29e4933fe7f7918fc359164fcdde2bce, server=3456ee6a3164,35939,1731738278639 in 225 msec 2024-11-16T06:24:48,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-16T06:24:48,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29e4933fe7f7918fc359164fcdde2bce, ASSIGN in 397 msec 2024-11-16T06:24:48,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed, server=3456ee6a3164,37533,1731738278939 in 231 msec 2024-11-16T06:24:48,478 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-16T06:24:48,478 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=791faad6072c5de4c2a3b58f0311f4ed, ASSIGN in 401 msec 2024-11-16T06:24:48,482 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:24:48,482 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738288482"}]},"ts":"1731738288482"} 2024-11-16T06:24:48,485 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-16T06:24:48,487 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:24:48,491 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-16T06:24:48,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:24:48,503 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:24:48,505 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36953, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:24:48,509 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:24:48,509 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:24:48,509 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:24:48,511 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41831, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-16T06:24:48,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:24:48,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:24:48,516 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59393, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-16T06:24:48,518 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-16T06:24:48,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T06:24:48,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-16T06:24:48,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-16T06:24:48,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-16T06:24:48,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-16T06:24:48,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:48,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:48,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:48,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:24:48,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-16T06:24:48,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-16T06:24:48,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-16T06:24:48,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-16T06:24:48,554 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 661 msec 2024-11-16T06:24:49,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T06:24:49,036 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-16T06:24:49,040 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-16T06:24:49,048 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-16T06:24:49,048 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:49,050 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:24:49,053 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-16T06:24:49,070 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-16T06:24:49,076 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:24:49,080 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:24:49,086 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-16T06:24:49,100 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-16T06:24:49,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738289100 (current time:1731738289100). 2024-11-16T06:24:49,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:24:49,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-16T06:24:49,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:24:49,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60441f8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:49,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:24:49,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:24:49,105 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:24:49,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:24:49,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:24:49,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41fd43b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:49,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:24:49,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:24:49,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:49,107 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59318, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:24:49,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b97a937, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:49,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:24:49,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:24:49,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:24:49,112 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36836, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:24:49,115 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:24:49,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:24:49,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:49,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:49,122 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:24:49,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66794339, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:49,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:24:49,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:24:49,125 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:24:49,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:24:49,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:24:49,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@652fcbc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:49,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:24:49,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:24:49,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:49,128 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59336, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:24:49,130 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24fd97f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:49,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:24:49,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:24:49,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:24:49,135 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36840, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:24:49,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:24:49,140 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:24:49,141 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44158, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:24:49,144 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:24:49,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:24:49,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:49,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:49,145 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:24:49,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-16T06:24:49,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:24:49,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-16T06:24:49,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-16T06:24:49,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-16T06:24:49,158 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:24:49,163 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:24:49,176 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:24:49,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741845_1021 (size=167) 2024-11-16T06:24:49,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741845_1021 (size=167) 2024-11-16T06:24:49,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741845_1021 (size=167) 2024-11-16T06:24:49,188 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:24:49,190 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29e4933fe7f7918fc359164fcdde2bce}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed}] 2024-11-16T06:24:49,194 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:49,195 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:49,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-16T06:24:49,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-16T06:24:49,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-16T06:24:49,354 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:49,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:24:49,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 29e4933fe7f7918fc359164fcdde2bce: 2024-11-16T06:24:49,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 791faad6072c5de4c2a3b58f0311f4ed: 2024-11-16T06:24:49,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. for emptySnaptb0-testExportWithTargetName completed. 2024-11-16T06:24:49,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. for emptySnaptb0-testExportWithTargetName completed. 2024-11-16T06:24:49,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-16T06:24:49,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-16T06:24:49,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:24:49,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:24:49,366 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:24:49,366 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:24:49,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741846_1022 (size=70) 2024-11-16T06:24:49,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741846_1022 (size=70) 2024-11-16T06:24:49,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741846_1022 (size=70) 2024-11-16T06:24:49,409 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:49,411 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-16T06:24:49,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741847_1023 (size=70) 2024-11-16T06:24:49,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-16T06:24:49,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741847_1023 (size=70) 2024-11-16T06:24:49,415 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:49,415 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:49,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741847_1023 (size=70) 2024-11-16T06:24:49,419 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:24:49,419 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-16T06:24:49,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-16T06:24:49,420 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:49,421 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:49,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 29e4933fe7f7918fc359164fcdde2bce in 228 msec 2024-11-16T06:24:49,436 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-16T06:24:49,436 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:24:49,436 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed in 232 msec 2024-11-16T06:24:49,441 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:24:49,444 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:24:49,444 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:24:49,445 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:49,446 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-16T06:24:49,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741848_1024 (size=62) 2024-11-16T06:24:49,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741848_1024 (size=62) 2024-11-16T06:24:49,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741848_1024 (size=62) 2024-11-16T06:24:49,473 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:24:49,473 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-16T06:24:49,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-16T06:24:49,477 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-16T06:24:49,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741849_1025 (size=649) 2024-11-16T06:24:49,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741849_1025 (size=649) 2024-11-16T06:24:49,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741849_1025 (size=649) 2024-11-16T06:24:49,523 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:24:49,541 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:24:49,543 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-16T06:24:49,547 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:24:49,547 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-16T06:24:49,557 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 395 msec 2024-11-16T06:24:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-16T06:24:49,785 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-16T06:24:49,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:24:49,810 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37533 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:24:49,815 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-16T06:24:49,821 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-16T06:24:49,821 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:49,821 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:24:49,824 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-16T06:24:49,835 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-16T06:24:49,849 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-16T06:24:49,855 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-16T06:24:49,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738289855 (current time:1731738289855). 2024-11-16T06:24:49,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:24:49,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-16T06:24:49,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:24:49,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a979b6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:49,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:24:49,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:24:49,858 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:24:49,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:24:49,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:24:49,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37a14949, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:49,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:24:49,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:24:49,860 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:49,861 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59354, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:24:49,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d8f151b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:49,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:24:49,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:24:49,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:24:49,868 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36852, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:24:49,872 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:24:49,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:24:49,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:49,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:49,873 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:24:49,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@428dc138, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:49,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:24:49,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:24:49,875 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:24:49,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:24:49,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:24:49,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c18209, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:49,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:24:49,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:24:49,878 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59374, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:24:49,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:49,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@160decc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:24:49,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:24:49,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:24:49,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:24:49,886 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36864, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:24:49,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:24:49,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:24:49,893 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44162, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:24:49,896 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:24:49,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:24:49,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:49,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:24:49,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-16T06:24:49,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:24:49,898 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:24:49,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-16T06:24:49,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-16T06:24:49,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-16T06:24:49,908 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:24:49,912 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:24:49,918 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:24:49,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741850_1026 (size=162) 2024-11-16T06:24:49,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741850_1026 (size=162) 2024-11-16T06:24:49,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741850_1026 (size=162) 2024-11-16T06:24:49,949 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:24:49,950 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29e4933fe7f7918fc359164fcdde2bce}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed}] 2024-11-16T06:24:49,953 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:49,953 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:50,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-16T06:24:50,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-16T06:24:50,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-16T06:24:50,108 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:24:50,109 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:50,113 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 791faad6072c5de4c2a3b58f0311f4ed 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-16T06:24:50,121 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 29e4933fe7f7918fc359164fcdde2bce 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-16T06:24:50,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-16T06:24:50,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241116d3ee7e4265bb47e893996a35b523a50a_29e4933fe7f7918fc359164fcdde2bce is 71, key is 0a74b4ad0ea590e1ac2010d1a3730499/cf:q/1731738289805/Put/seqid=0 2024-11-16T06:24:50,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241116aaed6cfa727b460e8efaeac96ec76478_791faad6072c5de4c2a3b58f0311f4ed is 71, key is 16b25097f8473d4465a06056728db710/cf:q/1731738289810/Put/seqid=0 2024-11-16T06:24:50,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741851_1027 (size=8241) 2024-11-16T06:24:50,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741851_1027 (size=8241) 2024-11-16T06:24:50,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741851_1027 (size=8241) 2024-11-16T06:24:50,264 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:50,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741852_1028 (size=5032) 2024-11-16T06:24:50,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741852_1028 (size=5032) 2024-11-16T06:24:50,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741852_1028 (size=5032) 2024-11-16T06:24:50,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:50,329 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241116d3ee7e4265bb47e893996a35b523a50a_29e4933fe7f7918fc359164fcdde2bce to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241116d3ee7e4265bb47e893996a35b523a50a_29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:50,329 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241116aaed6cfa727b460e8efaeac96ec76478_791faad6072c5de4c2a3b58f0311f4ed to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241116aaed6cfa727b460e8efaeac96ec76478_791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:50,331 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/.tmp/cf/c276b5e3853547a1b3a2addcbefbce44, store: [table=testtb-testExportWithTargetName family=cf region=29e4933fe7f7918fc359164fcdde2bce] 2024-11-16T06:24:50,331 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/.tmp/cf/9db7dc092be741e5ba4e365efc0b231c, store: [table=testtb-testExportWithTargetName family=cf region=791faad6072c5de4c2a3b58f0311f4ed] 2024-11-16T06:24:50,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/.tmp/cf/c276b5e3853547a1b3a2addcbefbce44 is 208, key is 0f4404dd243ce9df9ccee6c6afa745eff/cf:q/1731738289805/Put/seqid=0 2024-11-16T06:24:50,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/.tmp/cf/9db7dc092be741e5ba4e365efc0b231c is 208, key is 17eb5f30e4386cdd689c8470e94cf9450/cf:q/1731738289810/Put/seqid=0 2024-11-16T06:24:50,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741853_1029 (size=5706) 2024-11-16T06:24:50,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741854_1030 (size=15153) 2024-11-16T06:24:50,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741854_1030 (size=15153) 2024-11-16T06:24:50,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741853_1029 (size=5706) 2024-11-16T06:24:50,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741854_1030 (size=15153) 2024-11-16T06:24:50,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741853_1029 (size=5706) 2024-11-16T06:24:50,367 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/.tmp/cf/9db7dc092be741e5ba4e365efc0b231c 2024-11-16T06:24:50,368 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/.tmp/cf/c276b5e3853547a1b3a2addcbefbce44 2024-11-16T06:24:50,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/.tmp/cf/9db7dc092be741e5ba4e365efc0b231c as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/cf/9db7dc092be741e5ba4e365efc0b231c 2024-11-16T06:24:50,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/.tmp/cf/c276b5e3853547a1b3a2addcbefbce44 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/cf/c276b5e3853547a1b3a2addcbefbce44 2024-11-16T06:24:50,392 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/cf/9db7dc092be741e5ba4e365efc0b231c, entries=48, sequenceid=6, filesize=14.8 K 2024-11-16T06:24:50,392 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/cf/c276b5e3853547a1b3a2addcbefbce44, entries=2, sequenceid=6, filesize=5.6 K 2024-11-16T06:24:50,402 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 791faad6072c5de4c2a3b58f0311f4ed in 290ms, sequenceid=6, compaction requested=false 2024-11-16T06:24:50,402 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 29e4933fe7f7918fc359164fcdde2bce in 287ms, sequenceid=6, compaction requested=false 2024-11-16T06:24:50,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-16T06:24:50,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-16T06:24:50,403 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 791faad6072c5de4c2a3b58f0311f4ed: 2024-11-16T06:24:50,403 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 29e4933fe7f7918fc359164fcdde2bce: 2024-11-16T06:24:50,403 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. for snaptb0-testExportWithTargetName completed. 2024-11-16T06:24:50,404 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. for snaptb0-testExportWithTargetName completed. 2024-11-16T06:24:50,404 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-16T06:24:50,404 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-16T06:24:50,404 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:24:50,404 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:24:50,404 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/cf/9db7dc092be741e5ba4e365efc0b231c] hfiles 2024-11-16T06:24:50,404 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/cf/c276b5e3853547a1b3a2addcbefbce44] hfiles 2024-11-16T06:24:50,406 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/cf/9db7dc092be741e5ba4e365efc0b231c for snapshot=snaptb0-testExportWithTargetName 2024-11-16T06:24:50,406 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/cf/c276b5e3853547a1b3a2addcbefbce44 for snapshot=snaptb0-testExportWithTargetName 2024-11-16T06:24:50,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741855_1031 (size=109) 2024-11-16T06:24:50,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741855_1031 (size=109) 2024-11-16T06:24:50,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741855_1031 (size=109) 2024-11-16T06:24:50,433 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:24:50,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-16T06:24:50,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-16T06:24:50,435 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:50,435 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:50,440 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed in 487 msec 2024-11-16T06:24:50,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741856_1032 (size=109) 2024-11-16T06:24:50,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741856_1032 (size=109) 2024-11-16T06:24:50,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741856_1032 (size=109) 2024-11-16T06:24:50,444 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:24:50,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-16T06:24:50,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-16T06:24:50,446 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:50,446 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:50,451 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-11-16T06:24:50,451 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 29e4933fe7f7918fc359164fcdde2bce in 497 msec 2024-11-16T06:24:50,451 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:24:50,453 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:24:50,455 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:24:50,455 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:24:50,455 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:24:50,457 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241116aaed6cfa727b460e8efaeac96ec76478_791faad6072c5de4c2a3b58f0311f4ed, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241116d3ee7e4265bb47e893996a35b523a50a_29e4933fe7f7918fc359164fcdde2bce] hfiles 2024-11-16T06:24:50,458 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241116aaed6cfa727b460e8efaeac96ec76478_791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:24:50,458 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241116d3ee7e4265bb47e893996a35b523a50a_29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:24:50,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741857_1033 (size=293) 2024-11-16T06:24:50,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741857_1033 (size=293) 2024-11-16T06:24:50,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741857_1033 (size=293) 2024-11-16T06:24:50,470 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:24:50,470 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-16T06:24:50,471 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-16T06:24:50,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741858_1034 (size=959) 2024-11-16T06:24:50,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741858_1034 (size=959) 2024-11-16T06:24:50,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741858_1034 (size=959) 2024-11-16T06:24:50,500 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:24:50,514 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:24:50,515 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-16T06:24:50,518 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:24:50,518 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-16T06:24:50,522 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 620 msec 2024-11-16T06:24:50,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-16T06:24:50,525 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-16T06:24:50,526 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738290525 2024-11-16T06:24:50,526 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:36767, tgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738290525, rawTgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738290525, srcFsUri=hdfs://localhost:36767, srcDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:24:50,604 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:36767, inputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:24:50,604 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738290525, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738290525/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-16T06:24:50,611 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-16T06:24:50,636 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738290525/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-16T06:24:50,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741859_1035 (size=959) 2024-11-16T06:24:50,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741859_1035 (size=959) 2024-11-16T06:24:50,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741859_1035 (size=959) 2024-11-16T06:24:50,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741860_1036 (size=162) 2024-11-16T06:24:50,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741860_1036 (size=162) 2024-11-16T06:24:50,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741860_1036 (size=162) 2024-11-16T06:24:50,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741861_1037 (size=154) 2024-11-16T06:24:50,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741861_1037 (size=154) 2024-11-16T06:24:50,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741861_1037 (size=154) 2024-11-16T06:24:50,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:24:50,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:24:50,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:24:51,899 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-15761711039768805681.jar 2024-11-16T06:24:51,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:24:51,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:24:51,979 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-8958726024911976316.jar 2024-11-16T06:24:51,980 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:24:51,980 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:24:51,981 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:24:51,981 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:24:51,982 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:24:51,982 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:24:51,983 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-16T06:24:51,984 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-16T06:24:51,984 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-16T06:24:51,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-16T06:24:51,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-16T06:24:51,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-16T06:24:51,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-16T06:24:51,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-16T06:24:51,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-16T06:24:51,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-16T06:24:51,988 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-16T06:24:51,990 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:24:51,991 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:24:51,991 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:24:51,992 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:24:51,992 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:24:51,993 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:24:51,993 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:24:52,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741862_1038 (size=131440) 2024-11-16T06:24:52,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741862_1038 (size=131440) 2024-11-16T06:24:52,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741862_1038 (size=131440) 2024-11-16T06:24:52,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741863_1039 (size=4188619) 2024-11-16T06:24:52,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741863_1039 (size=4188619) 2024-11-16T06:24:52,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741863_1039 (size=4188619) 2024-11-16T06:24:52,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741864_1040 (size=440656) 2024-11-16T06:24:52,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741864_1040 (size=440656) 2024-11-16T06:24:52,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741864_1040 (size=440656) 2024-11-16T06:24:52,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741865_1041 (size=1323991) 2024-11-16T06:24:52,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741865_1041 (size=1323991) 2024-11-16T06:24:52,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741865_1041 (size=1323991) 2024-11-16T06:24:53,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741866_1042 (size=903732) 2024-11-16T06:24:53,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741866_1042 (size=903732) 2024-11-16T06:24:53,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741866_1042 (size=903732) 2024-11-16T06:24:53,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741867_1043 (size=8360083) 2024-11-16T06:24:53,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741867_1043 (size=8360083) 2024-11-16T06:24:53,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741867_1043 (size=8360083) 2024-11-16T06:24:53,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741868_1044 (size=1877034) 2024-11-16T06:24:53,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741868_1044 (size=1877034) 2024-11-16T06:24:53,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741868_1044 (size=1877034) 2024-11-16T06:24:53,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741869_1045 (size=77835) 2024-11-16T06:24:53,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741869_1045 (size=77835) 2024-11-16T06:24:53,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741869_1045 (size=77835) 2024-11-16T06:24:53,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741870_1046 (size=30949) 2024-11-16T06:24:53,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741870_1046 (size=30949) 2024-11-16T06:24:53,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741870_1046 (size=30949) 2024-11-16T06:24:53,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741871_1047 (size=1597327) 2024-11-16T06:24:53,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741871_1047 (size=1597327) 2024-11-16T06:24:53,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741871_1047 (size=1597327) 2024-11-16T06:24:53,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741872_1048 (size=4695811) 2024-11-16T06:24:53,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741872_1048 (size=4695811) 2024-11-16T06:24:53,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741872_1048 (size=4695811) 2024-11-16T06:24:53,907 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:24:53,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741873_1049 (size=232957) 2024-11-16T06:24:53,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741873_1049 (size=232957) 2024-11-16T06:24:53,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741873_1049 (size=232957) 2024-11-16T06:24:54,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741874_1050 (size=127628) 2024-11-16T06:24:54,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741874_1050 (size=127628) 2024-11-16T06:24:54,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741874_1050 (size=127628) 2024-11-16T06:24:54,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741875_1051 (size=20406) 2024-11-16T06:24:54,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741875_1051 (size=20406) 2024-11-16T06:24:54,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741875_1051 (size=20406) 2024-11-16T06:24:54,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741876_1052 (size=6424738) 2024-11-16T06:24:54,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741876_1052 (size=6424738) 2024-11-16T06:24:54,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741876_1052 (size=6424738) 2024-11-16T06:24:54,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741877_1053 (size=5175431) 2024-11-16T06:24:54,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741877_1053 (size=5175431) 2024-11-16T06:24:54,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741877_1053 (size=5175431) 2024-11-16T06:24:54,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741878_1054 (size=217634) 2024-11-16T06:24:54,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741878_1054 (size=217634) 2024-11-16T06:24:54,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741878_1054 (size=217634) 2024-11-16T06:24:54,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741879_1055 (size=1832290) 2024-11-16T06:24:54,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741879_1055 (size=1832290) 2024-11-16T06:24:54,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741879_1055 (size=1832290) 2024-11-16T06:24:54,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741880_1056 (size=322274) 2024-11-16T06:24:54,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741880_1056 (size=322274) 2024-11-16T06:24:54,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741880_1056 (size=322274) 2024-11-16T06:24:55,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741881_1057 (size=503880) 2024-11-16T06:24:55,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741881_1057 (size=503880) 2024-11-16T06:24:55,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741881_1057 (size=503880) 2024-11-16T06:24:55,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741882_1058 (size=29229) 2024-11-16T06:24:55,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741882_1058 (size=29229) 2024-11-16T06:24:55,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741882_1058 (size=29229) 2024-11-16T06:24:55,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741883_1059 (size=24096) 2024-11-16T06:24:55,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741883_1059 (size=24096) 2024-11-16T06:24:55,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741883_1059 (size=24096) 2024-11-16T06:24:55,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741884_1060 (size=111872) 2024-11-16T06:24:55,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741884_1060 (size=111872) 2024-11-16T06:24:55,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741884_1060 (size=111872) 2024-11-16T06:24:55,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741885_1061 (size=45609) 2024-11-16T06:24:55,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741885_1061 (size=45609) 2024-11-16T06:24:55,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741885_1061 (size=45609) 2024-11-16T06:24:55,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741886_1062 (size=136454) 2024-11-16T06:24:55,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741886_1062 (size=136454) 2024-11-16T06:24:55,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741886_1062 (size=136454) 2024-11-16T06:24:55,818 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-16T06:24:55,823 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-16T06:24:55,830 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.3 K 2024-11-16T06:24:55,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741887_1063 (size=722) 2024-11-16T06:24:55,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741887_1063 (size=722) 2024-11-16T06:24:55,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741887_1063 (size=722) 2024-11-16T06:24:55,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741888_1064 (size=15) 2024-11-16T06:24:55,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741888_1064 (size=15) 2024-11-16T06:24:55,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741888_1064 (size=15) 2024-11-16T06:24:56,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741889_1065 (size=303733) 2024-11-16T06:24:56,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741889_1065 (size=303733) 2024-11-16T06:24:56,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741889_1065 (size=303733) 2024-11-16T06:24:56,518 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:24:56,518 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:24:56,952 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0001_000001 (auth:SIMPLE) from 127.0.0.1:43570 2024-11-16T06:24:58,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-16T06:24:58,276 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-16T06:25:05,770 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0001_000001 (auth:SIMPLE) from 127.0.0.1:54596 2024-11-16T06:25:06,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741890_1066 (size=349383) 2024-11-16T06:25:06,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741890_1066 (size=349383) 2024-11-16T06:25:06,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741890_1066 (size=349383) 2024-11-16T06:25:06,745 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:25:08,115 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0001_000001 (auth:SIMPLE) from 127.0.0.1:37722 2024-11-16T06:25:16,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741891_1067 (size=15153) 2024-11-16T06:25:16,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741891_1067 (size=15153) 2024-11-16T06:25:16,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741891_1067 (size=15153) 2024-11-16T06:25:16,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741892_1068 (size=8241) 2024-11-16T06:25:16,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741892_1068 (size=8241) 2024-11-16T06:25:16,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741892_1068 (size=8241) 2024-11-16T06:25:16,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741893_1069 (size=5706) 2024-11-16T06:25:16,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741893_1069 (size=5706) 2024-11-16T06:25:16,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741893_1069 (size=5706) 2024-11-16T06:25:16,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741894_1070 (size=5032) 2024-11-16T06:25:16,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741894_1070 (size=5032) 2024-11-16T06:25:16,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741894_1070 (size=5032) 2024-11-16T06:25:16,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741895_1071 (size=17461) 2024-11-16T06:25:16,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741895_1071 (size=17461) 2024-11-16T06:25:16,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741895_1071 (size=17461) 2024-11-16T06:25:16,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741896_1072 (size=464) 2024-11-16T06:25:16,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741896_1072 (size=464) 2024-11-16T06:25:16,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741896_1072 (size=464) 2024-11-16T06:25:16,456 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_2/usercache/jenkins/appcache/application_1731738285782_0001/container_1731738285782_0001_01_000002/launch_container.sh] 2024-11-16T06:25:16,456 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_2/usercache/jenkins/appcache/application_1731738285782_0001/container_1731738285782_0001_01_000002/container_tokens] 2024-11-16T06:25:16,456 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_2/usercache/jenkins/appcache/application_1731738285782_0001/container_1731738285782_0001_01_000002/sysfs] 2024-11-16T06:25:16,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741897_1073 (size=17461) 2024-11-16T06:25:16,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741897_1073 (size=17461) 2024-11-16T06:25:16,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741897_1073 (size=17461) 2024-11-16T06:25:16,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741898_1074 (size=349383) 2024-11-16T06:25:16,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741898_1074 (size=349383) 2024-11-16T06:25:16,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741898_1074 (size=349383) 2024-11-16T06:25:16,527 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0001_000001 (auth:SIMPLE) from 127.0.0.1:45042 2024-11-16T06:25:17,917 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-16T06:25:17,918 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-16T06:25:17,928 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: testExportWithTargetName 2024-11-16T06:25:17,928 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-16T06:25:17,929 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-16T06:25:17,929 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-16T06:25:17,930 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-16T06:25:17,930 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-16T06:25:17,930 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738290525/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738290525/.hbase-snapshot/testExportWithTargetName 2024-11-16T06:25:17,931 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738290525/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-16T06:25:17,931 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738290525/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-16T06:25:17,944 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-11-16T06:25:17,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-16T06:25:17,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-16T06:25:17,954 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738317954"}]},"ts":"1731738317954"} 2024-11-16T06:25:17,957 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-16T06:25:17,957 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-16T06:25:17,960 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-16T06:25:17,966 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29e4933fe7f7918fc359164fcdde2bce, UNASSIGN}, {pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=791faad6072c5de4c2a3b58f0311f4ed, UNASSIGN}] 2024-11-16T06:25:17,968 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=791faad6072c5de4c2a3b58f0311f4ed, UNASSIGN 2024-11-16T06:25:17,968 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29e4933fe7f7918fc359164fcdde2bce, UNASSIGN 2024-11-16T06:25:17,971 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=29e4933fe7f7918fc359164fcdde2bce, regionState=CLOSING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:25:17,971 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=791faad6072c5de4c2a3b58f0311f4ed, regionState=CLOSING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:25:17,974 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29e4933fe7f7918fc359164fcdde2bce, UNASSIGN because future has completed 2024-11-16T06:25:17,975 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:25:17,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure 29e4933fe7f7918fc359164fcdde2bce, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:25:17,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=791faad6072c5de4c2a3b58f0311f4ed, UNASSIGN because future has completed 2024-11-16T06:25:17,980 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:25:17,980 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:25:18,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-16T06:25:18,136 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(122): Close 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:25:18,136 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:25:18,136 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:25:18,136 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:25:18,137 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 791faad6072c5de4c2a3b58f0311f4ed, disabling compactions & flushes 2024-11-16T06:25:18,137 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1722): Closing 29e4933fe7f7918fc359164fcdde2bce, disabling compactions & flushes 2024-11-16T06:25:18,137 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:25:18,137 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:25:18,137 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:25:18,137 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:25:18,137 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. after waiting 0 ms 2024-11-16T06:25:18,137 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. after waiting 0 ms 2024-11-16T06:25:18,137 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:25:18,137 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:25:18,149 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:25:18,149 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:25:18,153 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:25:18,153 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:25:18,154 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed. 2024-11-16T06:25:18,154 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce. 2024-11-16T06:25:18,154 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1676): Region close journal for 29e4933fe7f7918fc359164fcdde2bce: Waiting for close lock at 1731738318137Running coprocessor pre-close hooks at 1731738318137Disabling compacts and flushes for region at 1731738318137Disabling writes for close at 1731738318137Writing region close event to WAL at 1731738318142 (+5 ms)Running coprocessor post-close hooks at 1731738318150 (+8 ms)Closed at 1731738318154 (+4 ms) 2024-11-16T06:25:18,154 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 791faad6072c5de4c2a3b58f0311f4ed: Waiting for close lock at 1731738318137Running coprocessor pre-close hooks at 1731738318137Disabling compacts and flushes for region at 1731738318137Disabling writes for close at 1731738318137Writing region close event to WAL at 1731738318142 (+5 ms)Running coprocessor post-close hooks at 1731738318150 (+8 ms)Closed at 1731738318154 (+4 ms) 2024-11-16T06:25:18,166 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(157): Closed 29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:25:18,167 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=29e4933fe7f7918fc359164fcdde2bce, regionState=CLOSED 2024-11-16T06:25:18,168 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:25:18,170 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=791faad6072c5de4c2a3b58f0311f4ed, regionState=CLOSED 2024-11-16T06:25:18,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure 29e4933fe7f7918fc359164fcdde2bce, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:25:18,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:25:18,186 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=20 2024-11-16T06:25:18,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=20, state=SUCCESS, hasLock=false; CloseRegionProcedure 29e4933fe7f7918fc359164fcdde2bce, server=3456ee6a3164,35939,1731738278639 in 199 msec 2024-11-16T06:25:18,190 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=29e4933fe7f7918fc359164fcdde2bce, UNASSIGN in 221 msec 2024-11-16T06:25:18,190 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=21 2024-11-16T06:25:18,190 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=21, state=SUCCESS, hasLock=false; CloseRegionProcedure 791faad6072c5de4c2a3b58f0311f4ed, server=3456ee6a3164,37533,1731738278939 in 204 msec 2024-11-16T06:25:18,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=19 2024-11-16T06:25:18,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=791faad6072c5de4c2a3b58f0311f4ed, UNASSIGN in 224 msec 2024-11-16T06:25:18,201 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-16T06:25:18,201 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 237 msec 2024-11-16T06:25:18,204 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738318203"}]},"ts":"1731738318203"} 2024-11-16T06:25:18,206 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-16T06:25:18,206 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-16T06:25:18,213 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 262 msec 2024-11-16T06:25:18,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-16T06:25:18,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-16T06:25:18,275 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-16T06:25:18,280 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-11-16T06:25:18,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-16T06:25:18,287 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-16T06:25:18,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-16T06:25:18,290 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=24, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-16T06:25:18,298 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-16T06:25:18,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-16T06:25:18,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-16T06:25:18,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-16T06:25:18,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-16T06:25:18,304 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-16T06:25:18,305 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-16T06:25:18,305 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-16T06:25:18,305 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-16T06:25:18,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-16T06:25:18,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-16T06:25:18,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-16T06:25:18,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:18,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:18,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:18,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-16T06:25:18,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:18,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-16T06:25:18,321 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:25:18,328 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/recovered.edits] 2024-11-16T06:25:18,338 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:25:18,344 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/cf/c276b5e3853547a1b3a2addcbefbce44 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/cf/c276b5e3853547a1b3a2addcbefbce44 2024-11-16T06:25:18,353 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/recovered.edits] 2024-11-16T06:25:18,354 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce/recovered.edits/9.seqid 2024-11-16T06:25:18,357 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:25:18,366 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/cf/9db7dc092be741e5ba4e365efc0b231c to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/cf/9db7dc092be741e5ba4e365efc0b231c 2024-11-16T06:25:18,371 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed/recovered.edits/9.seqid 2024-11-16T06:25:18,372 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithTargetName/791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:25:18,372 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-16T06:25:18,373 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-11-16T06:25:18,374 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-11-16T06:25:18,379 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241116aaed6cfa727b460e8efaeac96ec76478_791faad6072c5de4c2a3b58f0311f4ed to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241116aaed6cfa727b460e8efaeac96ec76478_791faad6072c5de4c2a3b58f0311f4ed 2024-11-16T06:25:18,381 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241116d3ee7e4265bb47e893996a35b523a50a_29e4933fe7f7918fc359164fcdde2bce to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241116d3ee7e4265bb47e893996a35b523a50a_29e4933fe7f7918fc359164fcdde2bce 2024-11-16T06:25:18,382 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-11-16T06:25:18,385 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=24, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-16T06:25:18,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37533 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-16T06:25:18,397 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-16T06:25:18,402 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-16T06:25:18,404 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=24, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-16T06:25:18,405 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-16T06:25:18,405 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738318405"}]},"ts":"9223372036854775807"} 2024-11-16T06:25:18,405 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738318405"}]},"ts":"9223372036854775807"} 2024-11-16T06:25:18,411 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-16T06:25:18,411 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 29e4933fe7f7918fc359164fcdde2bce, NAME => 'testtb-testExportWithTargetName,,1731738287886.29e4933fe7f7918fc359164fcdde2bce.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 791faad6072c5de4c2a3b58f0311f4ed, NAME => 'testtb-testExportWithTargetName,1,1731738287886.791faad6072c5de4c2a3b58f0311f4ed.', STARTKEY => '1', ENDKEY => ''}] 2024-11-16T06:25:18,411 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-16T06:25:18,412 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731738318411"}]},"ts":"9223372036854775807"} 2024-11-16T06:25:18,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-16T06:25:18,418 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-16T06:25:18,420 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=24, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-16T06:25:18,426 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 139 msec 2024-11-16T06:25:18,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-16T06:25:18,626 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-16T06:25:18,626 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-16T06:25:18,642 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-16T06:25:18,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-16T06:25:18,649 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-16T06:25:18,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-16T06:25:18,688 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=770 (was 718) Potentially hanging thread: RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39073 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2071199890_1 at /127.0.0.1:53816 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (692179358) connection to localhost/127.0.0.1:46453 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (692179358) connection to localhost/127.0.0.1:39241 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39241 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:35838 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 110031) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:40148 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2071199890_1 at /127.0.0.1:35834 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46453 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1291 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:53840 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=807 (was 792) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=625 (was 262) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=1131 (was 3984) 2024-11-16T06:25:18,689 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=770 is superior to 500 2024-11-16T06:25:18,711 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=770, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=625, ProcessCount=19, AvailableMemoryMB=1129 2024-11-16T06:25:18,711 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=770 is superior to 500 2024-11-16T06:25:18,713 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:25:18,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-16T06:25:18,716 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:25:18,717 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 25 2024-11-16T06:25:18,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-16T06:25:18,719 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:25:18,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741899_1075 (size=440) 2024-11-16T06:25:18,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741899_1075 (size=440) 2024-11-16T06:25:18,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741899_1075 (size=440) 2024-11-16T06:25:18,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-16T06:25:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-16T06:25:19,157 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b8b204e287a13c4f6a6abd23bb44cb18, NAME => 'testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:25:19,158 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c6bf8111ffadc92c646f6257cdec9554, NAME => 'testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:25:19,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741900_1076 (size=65) 2024-11-16T06:25:19,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741900_1076 (size=65) 2024-11-16T06:25:19,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741900_1076 (size=65) 2024-11-16T06:25:19,170 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:19,170 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing b8b204e287a13c4f6a6abd23bb44cb18, disabling compactions & flushes 2024-11-16T06:25:19,170 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:19,170 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:19,170 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. after waiting 0 ms 2024-11-16T06:25:19,170 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:19,170 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:19,170 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for b8b204e287a13c4f6a6abd23bb44cb18: Waiting for close lock at 1731738319170Disabling compacts and flushes for region at 1731738319170Disabling writes for close at 1731738319170Writing region close event to WAL at 1731738319170Closed at 1731738319170 2024-11-16T06:25:19,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741901_1077 (size=65) 2024-11-16T06:25:19,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741901_1077 (size=65) 2024-11-16T06:25:19,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741901_1077 (size=65) 2024-11-16T06:25:19,177 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:19,178 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing c6bf8111ffadc92c646f6257cdec9554, disabling compactions & flushes 2024-11-16T06:25:19,178 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:19,178 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:19,178 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. after waiting 0 ms 2024-11-16T06:25:19,178 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:19,178 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:19,178 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for c6bf8111ffadc92c646f6257cdec9554: Waiting for close lock at 1731738319178Disabling compacts and flushes for region at 1731738319178Disabling writes for close at 1731738319178Writing region close event to WAL at 1731738319178Closed at 1731738319178 2024-11-16T06:25:19,179 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:25:19,180 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731738319180"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738319180"}]},"ts":"1731738319180"} 2024-11-16T06:25:19,180 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731738319180"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738319180"}]},"ts":"1731738319180"} 2024-11-16T06:25:19,184 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-16T06:25:19,185 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:25:19,186 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738319186"}]},"ts":"1731738319186"} 2024-11-16T06:25:19,188 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-16T06:25:19,189 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:25:19,190 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:25:19,190 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:25:19,190 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:25:19,190 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:25:19,190 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:25:19,190 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:25:19,191 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:25:19,191 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:25:19,191 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:25:19,191 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:25:19,191 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b8b204e287a13c4f6a6abd23bb44cb18, ASSIGN}, {pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c6bf8111ffadc92c646f6257cdec9554, ASSIGN}] 2024-11-16T06:25:19,193 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c6bf8111ffadc92c646f6257cdec9554, ASSIGN 2024-11-16T06:25:19,193 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b8b204e287a13c4f6a6abd23bb44cb18, ASSIGN 2024-11-16T06:25:19,194 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b8b204e287a13c4f6a6abd23bb44cb18, ASSIGN; state=OFFLINE, location=3456ee6a3164,37533,1731738278939; forceNewPlan=false, retain=false 2024-11-16T06:25:19,194 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c6bf8111ffadc92c646f6257cdec9554, ASSIGN; state=OFFLINE, location=3456ee6a3164,35939,1731738278639; forceNewPlan=false, retain=false 2024-11-16T06:25:19,345 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-16T06:25:19,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-16T06:25:19,346 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=c6bf8111ffadc92c646f6257cdec9554, regionState=OPENING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:25:19,346 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=b8b204e287a13c4f6a6abd23bb44cb18, regionState=OPENING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:25:19,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c6bf8111ffadc92c646f6257cdec9554, ASSIGN because future has completed 2024-11-16T06:25:19,351 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure c6bf8111ffadc92c646f6257cdec9554, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:25:19,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b8b204e287a13c4f6a6abd23bb44cb18, ASSIGN because future has completed 2024-11-16T06:25:19,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:25:19,509 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:19,509 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7752): Opening region: {ENCODED => c6bf8111ffadc92c646f6257cdec9554, NAME => 'testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554.', STARTKEY => '1', ENDKEY => ''} 2024-11-16T06:25:19,510 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. service=AccessControlService 2024-11-16T06:25:19,510 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:25:19,510 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:19,510 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:19,510 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7794): checking encryption for c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:19,511 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7797): checking classloading for c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:19,512 INFO [StoreOpener-c6bf8111ffadc92c646f6257cdec9554-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:19,512 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:19,512 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7752): Opening region: {ENCODED => b8b204e287a13c4f6a6abd23bb44cb18, NAME => 'testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18.', STARTKEY => '', ENDKEY => '1'} 2024-11-16T06:25:19,513 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. service=AccessControlService 2024-11-16T06:25:19,513 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:25:19,513 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:19,513 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:19,513 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7794): checking encryption for b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:19,514 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7797): checking classloading for b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:19,514 INFO [StoreOpener-c6bf8111ffadc92c646f6257cdec9554-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c6bf8111ffadc92c646f6257cdec9554 columnFamilyName cf 2024-11-16T06:25:19,515 INFO [StoreOpener-b8b204e287a13c4f6a6abd23bb44cb18-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:19,516 DEBUG [StoreOpener-c6bf8111ffadc92c646f6257cdec9554-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:19,516 INFO [StoreOpener-c6bf8111ffadc92c646f6257cdec9554-1 {}] regionserver.HStore(327): Store=c6bf8111ffadc92c646f6257cdec9554/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:25:19,517 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1038): replaying wal for c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:19,518 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:19,518 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:19,519 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1048): stopping wal replay for c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:19,519 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1060): Cleaning up temporary data for c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:19,520 INFO [StoreOpener-b8b204e287a13c4f6a6abd23bb44cb18-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b8b204e287a13c4f6a6abd23bb44cb18 columnFamilyName cf 2024-11-16T06:25:19,521 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1093): writing seq id for c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:19,521 DEBUG [StoreOpener-b8b204e287a13c4f6a6abd23bb44cb18-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:19,522 INFO [StoreOpener-b8b204e287a13c4f6a6abd23bb44cb18-1 {}] regionserver.HStore(327): Store=b8b204e287a13c4f6a6abd23bb44cb18/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:25:19,523 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1038): replaying wal for b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:19,524 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:19,524 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:19,524 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:25:19,525 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1048): stopping wal replay for b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:19,525 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1060): Cleaning up temporary data for b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:19,525 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1114): Opened c6bf8111ffadc92c646f6257cdec9554; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59617850, jitterRate=-0.1116248071193695}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:25:19,525 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:19,526 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1006): Region open journal for c6bf8111ffadc92c646f6257cdec9554: Running coprocessor pre-open hook at 1731738319511Writing region info on filesystem at 1731738319511Initializing all the Stores at 1731738319511Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738319512 (+1 ms)Cleaning up temporary data from old regions at 1731738319519 (+7 ms)Running coprocessor post-open hooks at 1731738319526 (+7 ms)Region opened successfully at 1731738319526 2024-11-16T06:25:19,527 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1093): writing seq id for b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:19,527 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554., pid=28, masterSystemTime=1731738319505 2024-11-16T06:25:19,531 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:19,531 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:19,531 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:25:19,532 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=c6bf8111ffadc92c646f6257cdec9554, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:25:19,535 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1114): Opened b8b204e287a13c4f6a6abd23bb44cb18; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64460537, jitterRate=-0.03946314752101898}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:25:19,535 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:19,535 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1006): Region open journal for b8b204e287a13c4f6a6abd23bb44cb18: Running coprocessor pre-open hook at 1731738319514Writing region info on filesystem at 1731738319514Initializing all the Stores at 1731738319515 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738319515Cleaning up temporary data from old regions at 1731738319525 (+10 ms)Running coprocessor post-open hooks at 1731738319535 (+10 ms)Region opened successfully at 1731738319535 2024-11-16T06:25:19,537 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18., pid=29, masterSystemTime=1731738319508 2024-11-16T06:25:19,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=28, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure c6bf8111ffadc92c646f6257cdec9554, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:25:19,546 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:19,547 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:19,549 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=b8b204e287a13c4f6a6abd23bb44cb18, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:25:19,551 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=27 2024-11-16T06:25:19,553 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:25:19,553 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=27, state=SUCCESS, hasLock=false; OpenRegionProcedure c6bf8111ffadc92c646f6257cdec9554, server=3456ee6a3164,35939,1731738278639 in 195 msec 2024-11-16T06:25:19,554 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c6bf8111ffadc92c646f6257cdec9554, ASSIGN in 360 msec 2024-11-16T06:25:19,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=26 2024-11-16T06:25:19,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=26, state=SUCCESS, hasLock=false; OpenRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18, server=3456ee6a3164,37533,1731738278939 in 202 msec 2024-11-16T06:25:19,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=25 2024-11-16T06:25:19,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b8b204e287a13c4f6a6abd23bb44cb18, ASSIGN in 367 msec 2024-11-16T06:25:19,564 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:25:19,565 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738319565"}]},"ts":"1731738319565"} 2024-11-16T06:25:19,569 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-16T06:25:19,571 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:25:19,573 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-16T06:25:19,579 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-16T06:25:19,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:19,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:19,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:19,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:19,584 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:19,584 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:19,584 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:19,585 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:19,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 872 msec 2024-11-16T06:25:19,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-16T06:25:19,856 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-16T06:25:19,856 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:19,861 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-16T06:25:19,861 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:19,862 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:25:19,864 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:19,872 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:19,881 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:19,885 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-16T06:25:19,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738319885 (current time:1731738319885). 2024-11-16T06:25:19,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:25:19,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-16T06:25:19,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:25:19,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59de0deb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:19,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:19,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:19,887 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:19,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:19,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:19,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48451504, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:19,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:19,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:19,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:19,889 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50126, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:19,890 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a8b91d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:19,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:19,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:19,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:19,893 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39532, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:19,894 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:19,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:19,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:19,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:19,894 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:19,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@404afe20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:19,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:19,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:19,896 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:19,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:19,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:19,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bed0ddf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:19,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:19,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:19,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:19,898 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50142, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:19,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@84877ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:19,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:19,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:19,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:19,901 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39538, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:19,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:25:19,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:19,905 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33076, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:19,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:19,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:19,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:19,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:19,907 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:19,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-16T06:25:19,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:25:19,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-16T06:25:19,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-16T06:25:19,910 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:25:19,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-16T06:25:19,912 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:25:19,915 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:25:19,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741902_1078 (size=161) 2024-11-16T06:25:19,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741902_1078 (size=161) 2024-11-16T06:25:19,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741902_1078 (size=161) 2024-11-16T06:25:19,924 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:25:19,924 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18}, {pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6bf8111ffadc92c646f6257cdec9554}] 2024-11-16T06:25:19,926 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:19,926 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:20,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-16T06:25:20,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=31 2024-11-16T06:25:20,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=32 2024-11-16T06:25:20,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:20,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:20,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.HRegion(2603): Flush status journal for c6bf8111ffadc92c646f6257cdec9554: 2024-11-16T06:25:20,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.HRegion(2603): Flush status journal for b8b204e287a13c4f6a6abd23bb44cb18: 2024-11-16T06:25:20,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-16T06:25:20,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-16T06:25:20,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-16T06:25:20,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-16T06:25:20,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:20,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:20,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:25:20,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:25:20,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741903_1079 (size=68) 2024-11-16T06:25:20,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741903_1079 (size=68) 2024-11-16T06:25:20,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741903_1079 (size=68) 2024-11-16T06:25:20,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:20,093 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=32 2024-11-16T06:25:20,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=32 2024-11-16T06:25:20,093 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:20,094 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:20,097 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c6bf8111ffadc92c646f6257cdec9554 in 171 msec 2024-11-16T06:25:20,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741904_1080 (size=68) 2024-11-16T06:25:20,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741904_1080 (size=68) 2024-11-16T06:25:20,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741904_1080 (size=68) 2024-11-16T06:25:20,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:20,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-16T06:25:20,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=31 2024-11-16T06:25:20,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:20,108 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:20,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=31, resume processing ppid=30 2024-11-16T06:25:20,111 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:25:20,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18 in 185 msec 2024-11-16T06:25:20,112 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:25:20,113 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:25:20,113 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:25:20,114 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:20,114 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-16T06:25:20,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741905_1081 (size=60) 2024-11-16T06:25:20,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741905_1081 (size=60) 2024-11-16T06:25:20,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741905_1081 (size=60) 2024-11-16T06:25:20,124 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:25:20,124 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-16T06:25:20,125 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-16T06:25:20,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741906_1082 (size=641) 2024-11-16T06:25:20,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741906_1082 (size=641) 2024-11-16T06:25:20,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741906_1082 (size=641) 2024-11-16T06:25:20,139 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:25:20,146 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:25:20,147 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-16T06:25:20,149 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:25:20,149 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-16T06:25:20,152 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 241 msec 2024-11-16T06:25:20,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-16T06:25:20,225 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-16T06:25:20,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37533 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:25:20,243 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:25:20,246 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:20,250 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-16T06:25:20,250 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:20,251 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:25:20,254 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:20,264 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:20,273 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:20,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-16T06:25:20,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738320277 (current time:1731738320277). 2024-11-16T06:25:20,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:25:20,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-16T06:25:20,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:25:20,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1133bc0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:20,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:20,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:20,279 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:20,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:20,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:20,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25ab3129, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:20,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:20,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:20,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:20,282 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50164, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:20,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3070591d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:20,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:20,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:20,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:20,285 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39552, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:20,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:20,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:20,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:20,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:20,286 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:20,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4599a6f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:20,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:20,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:20,288 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:20,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:20,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:20,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c0462e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:20,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:20,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:20,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:20,290 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50180, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:20,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b8a6790, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:20,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:20,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:20,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:20,294 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39568, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:20,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:25:20,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:20,297 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33092, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:20,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:20,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:20,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:20,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:20,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-16T06:25:20,299 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:20,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:25:20,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-16T06:25:20,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-16T06:25:20,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-16T06:25:20,303 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:25:20,304 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:25:20,307 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:25:20,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741907_1083 (size=156) 2024-11-16T06:25:20,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741907_1083 (size=156) 2024-11-16T06:25:20,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741907_1083 (size=156) 2024-11-16T06:25:20,317 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:25:20,317 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18}, {pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6bf8111ffadc92c646f6257cdec9554}] 2024-11-16T06:25:20,318 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:20,319 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:20,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-16T06:25:20,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=34 2024-11-16T06:25:20,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=35 2024-11-16T06:25:20,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:20,471 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2902): Flushing b8b204e287a13c4f6a6abd23bb44cb18 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-11-16T06:25:20,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:20,472 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2902): Flushing c6bf8111ffadc92c646f6257cdec9554 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-11-16T06:25:20,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241116fd56b3801ccf4d2c9ca4dc0d34c59c61_b8b204e287a13c4f6a6abd23bb44cb18 is 71, key is 081b4ca1fbb26407d44f13f3d35c1a46/cf:q/1731738320237/Put/seqid=0 2024-11-16T06:25:20,499 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024111675a38be1ffb94fa0afa9687de687398b_c6bf8111ffadc92c646f6257cdec9554 is 71, key is 10d5eed45a815c07d5738c2867dae9a6/cf:q/1731738320242/Put/seqid=0 2024-11-16T06:25:20,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741908_1084 (size=5312) 2024-11-16T06:25:20,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741908_1084 (size=5312) 2024-11-16T06:25:20,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741908_1084 (size=5312) 2024-11-16T06:25:20,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:20,515 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241116fd56b3801ccf4d2c9ca4dc0d34c59c61_b8b204e287a13c4f6a6abd23bb44cb18 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241116fd56b3801ccf4d2c9ca4dc0d34c59c61_b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:20,517 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/.tmp/cf/ca89056e768d4de9a40cb01e68521bbc, store: [table=testtb-testExportWithResetTtl family=cf region=b8b204e287a13c4f6a6abd23bb44cb18] 2024-11-16T06:25:20,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/.tmp/cf/ca89056e768d4de9a40cb01e68521bbc is 206, key is 07bd449a5f10514c8acab3acbe35dd6c4/cf:q/1731738320237/Put/seqid=0 2024-11-16T06:25:20,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741909_1085 (size=7961) 2024-11-16T06:25:20,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741909_1085 (size=7961) 2024-11-16T06:25:20,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741909_1085 (size=7961) 2024-11-16T06:25:20,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:20,532 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024111675a38be1ffb94fa0afa9687de687398b_c6bf8111ffadc92c646f6257cdec9554 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024111675a38be1ffb94fa0afa9687de687398b_c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:20,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/.tmp/cf/aeb8d1d24c2e45d58b0b1240d7e07d9a, store: [table=testtb-testExportWithResetTtl family=cf region=c6bf8111ffadc92c646f6257cdec9554] 2024-11-16T06:25:20,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/.tmp/cf/aeb8d1d24c2e45d58b0b1240d7e07d9a is 206, key is 17b67b359ce9f408cef5249e5cf5ddd58/cf:q/1731738320242/Put/seqid=0 2024-11-16T06:25:20,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741910_1086 (size=6512) 2024-11-16T06:25:20,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741910_1086 (size=6512) 2024-11-16T06:25:20,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741910_1086 (size=6512) 2024-11-16T06:25:20,541 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/.tmp/cf/ca89056e768d4de9a40cb01e68521bbc 2024-11-16T06:25:20,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741911_1087 (size=14247) 2024-11-16T06:25:20,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741911_1087 (size=14247) 2024-11-16T06:25:20,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741911_1087 (size=14247) 2024-11-16T06:25:20,550 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/.tmp/cf/aeb8d1d24c2e45d58b0b1240d7e07d9a 2024-11-16T06:25:20,550 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/.tmp/cf/ca89056e768d4de9a40cb01e68521bbc as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/cf/ca89056e768d4de9a40cb01e68521bbc 2024-11-16T06:25:20,559 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/.tmp/cf/aeb8d1d24c2e45d58b0b1240d7e07d9a as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/cf/aeb8d1d24c2e45d58b0b1240d7e07d9a 2024-11-16T06:25:20,560 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/cf/ca89056e768d4de9a40cb01e68521bbc, entries=6, sequenceid=6, filesize=6.4 K 2024-11-16T06:25:20,561 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for b8b204e287a13c4f6a6abd23bb44cb18 in 90ms, sequenceid=6, compaction requested=false 2024-11-16T06:25:20,561 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-16T06:25:20,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2603): Flush status journal for b8b204e287a13c4f6a6abd23bb44cb18: 2024-11-16T06:25:20,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. for snaptb0-testExportWithResetTtl completed. 2024-11-16T06:25:20,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-16T06:25:20,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:20,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/cf/ca89056e768d4de9a40cb01e68521bbc] hfiles 2024-11-16T06:25:20,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/cf/ca89056e768d4de9a40cb01e68521bbc for snapshot=snaptb0-testExportWithResetTtl 2024-11-16T06:25:20,568 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/cf/aeb8d1d24c2e45d58b0b1240d7e07d9a, entries=44, sequenceid=6, filesize=13.9 K 2024-11-16T06:25:20,569 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for c6bf8111ffadc92c646f6257cdec9554 in 97ms, sequenceid=6, compaction requested=false 2024-11-16T06:25:20,570 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2603): Flush status journal for c6bf8111ffadc92c646f6257cdec9554: 2024-11-16T06:25:20,570 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. for snaptb0-testExportWithResetTtl completed. 2024-11-16T06:25:20,570 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-16T06:25:20,570 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:20,570 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/cf/aeb8d1d24c2e45d58b0b1240d7e07d9a] hfiles 2024-11-16T06:25:20,570 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/cf/aeb8d1d24c2e45d58b0b1240d7e07d9a for snapshot=snaptb0-testExportWithResetTtl 2024-11-16T06:25:20,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741912_1088 (size=107) 2024-11-16T06:25:20,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741912_1088 (size=107) 2024-11-16T06:25:20,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741912_1088 (size=107) 2024-11-16T06:25:20,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:20,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=34 2024-11-16T06:25:20,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=34 2024-11-16T06:25:20,585 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:20,586 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:20,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741913_1089 (size=107) 2024-11-16T06:25:20,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741913_1089 (size=107) 2024-11-16T06:25:20,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741913_1089 (size=107) 2024-11-16T06:25:20,591 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18 in 271 msec 2024-11-16T06:25:20,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:20,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-16T06:25:20,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=35 2024-11-16T06:25:20,593 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:20,593 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:20,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-11-16T06:25:20,598 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c6bf8111ffadc92c646f6257cdec9554 in 278 msec 2024-11-16T06:25:20,598 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:25:20,599 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:25:20,600 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:25:20,600 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:25:20,600 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:20,602 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024111675a38be1ffb94fa0afa9687de687398b_c6bf8111ffadc92c646f6257cdec9554, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241116fd56b3801ccf4d2c9ca4dc0d34c59c61_b8b204e287a13c4f6a6abd23bb44cb18] hfiles 2024-11-16T06:25:20,602 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024111675a38be1ffb94fa0afa9687de687398b_c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:20,602 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241116fd56b3801ccf4d2c9ca4dc0d34c59c61_b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:20,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741914_1090 (size=291) 2024-11-16T06:25:20,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741914_1090 (size=291) 2024-11-16T06:25:20,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741914_1090 (size=291) 2024-11-16T06:25:20,615 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:25:20,615 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-16T06:25:20,616 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-16T06:25:20,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-16T06:25:20,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741915_1091 (size=951) 2024-11-16T06:25:20,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741915_1091 (size=951) 2024-11-16T06:25:20,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741915_1091 (size=951) 2024-11-16T06:25:20,646 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:25:20,659 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:25:20,660 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-16T06:25:20,663 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:25:20,663 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-16T06:25:20,667 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 363 msec 2024-11-16T06:25:20,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-16T06:25:20,935 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-16T06:25:20,937 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:25:20,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-16T06:25:20,939 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:25:20,939 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 36 2024-11-16T06:25:20,940 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:25:20,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-16T06:25:20,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741916_1092 (size=433) 2024-11-16T06:25:20,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741916_1092 (size=433) 2024-11-16T06:25:20,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741916_1092 (size=433) 2024-11-16T06:25:20,963 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ca11b343935cb24c3277af6ab0705703, NAME => 'testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:25:20,963 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 208eeab640ce5cbedd00f4a25021fc07, NAME => 'testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:25:20,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741918_1094 (size=58) 2024-11-16T06:25:20,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741918_1094 (size=58) 2024-11-16T06:25:20,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741918_1094 (size=58) 2024-11-16T06:25:20,983 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:20,983 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing ca11b343935cb24c3277af6ab0705703, disabling compactions & flushes 2024-11-16T06:25:20,983 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:20,983 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:20,983 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. after waiting 0 ms 2024-11-16T06:25:20,983 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:20,983 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:20,984 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for ca11b343935cb24c3277af6ab0705703: Waiting for close lock at 1731738320983Disabling compacts and flushes for region at 1731738320983Disabling writes for close at 1731738320983Writing region close event to WAL at 1731738320983Closed at 1731738320983 2024-11-16T06:25:20,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741917_1093 (size=58) 2024-11-16T06:25:20,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741917_1093 (size=58) 2024-11-16T06:25:21,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741917_1093 (size=58) 2024-11-16T06:25:21,003 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:21,003 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 208eeab640ce5cbedd00f4a25021fc07, disabling compactions & flushes 2024-11-16T06:25:21,003 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:21,003 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:21,003 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. after waiting 0 ms 2024-11-16T06:25:21,003 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:21,003 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:21,003 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 208eeab640ce5cbedd00f4a25021fc07: Waiting for close lock at 1731738321003Disabling compacts and flushes for region at 1731738321003Disabling writes for close at 1731738321003Writing region close event to WAL at 1731738321003Closed at 1731738321003 2024-11-16T06:25:21,005 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:25:21,005 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1731738321005"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738321005"}]},"ts":"1731738321005"} 2024-11-16T06:25:21,006 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1731738321005"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738321005"}]},"ts":"1731738321005"} 2024-11-16T06:25:21,009 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-16T06:25:21,012 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:25:21,012 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738321012"}]},"ts":"1731738321012"} 2024-11-16T06:25:21,015 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-16T06:25:21,016 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:25:21,021 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:25:21,021 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:25:21,021 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:25:21,021 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:25:21,021 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:25:21,021 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:25:21,021 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:25:21,021 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:25:21,021 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:25:21,021 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:25:21,022 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=208eeab640ce5cbedd00f4a25021fc07, ASSIGN}, {pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca11b343935cb24c3277af6ab0705703, ASSIGN}] 2024-11-16T06:25:21,025 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca11b343935cb24c3277af6ab0705703, ASSIGN 2024-11-16T06:25:21,025 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=208eeab640ce5cbedd00f4a25021fc07, ASSIGN 2024-11-16T06:25:21,027 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca11b343935cb24c3277af6ab0705703, ASSIGN; state=OFFLINE, location=3456ee6a3164,35939,1731738278639; forceNewPlan=false, retain=false 2024-11-16T06:25:21,028 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=208eeab640ce5cbedd00f4a25021fc07, ASSIGN; state=OFFLINE, location=3456ee6a3164,40155,1731738278871; forceNewPlan=false, retain=false 2024-11-16T06:25:21,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-16T06:25:21,177 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-16T06:25:21,177 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=ca11b343935cb24c3277af6ab0705703, regionState=OPENING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:25:21,177 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=208eeab640ce5cbedd00f4a25021fc07, regionState=OPENING, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:25:21,180 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=208eeab640ce5cbedd00f4a25021fc07, ASSIGN because future has completed 2024-11-16T06:25:21,181 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure 208eeab640ce5cbedd00f4a25021fc07, server=3456ee6a3164,40155,1731738278871}] 2024-11-16T06:25:21,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca11b343935cb24c3277af6ab0705703, ASSIGN because future has completed 2024-11-16T06:25:21,183 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure ca11b343935cb24c3277af6ab0705703, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:25:21,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-16T06:25:21,334 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T06:25:21,336 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59971, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T06:25:21,340 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:21,340 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7752): Opening region: {ENCODED => ca11b343935cb24c3277af6ab0705703, NAME => 'testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703.', STARTKEY => '1', ENDKEY => ''} 2024-11-16T06:25:21,340 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. service=AccessControlService 2024-11-16T06:25:21,340 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:25:21,340 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:21,341 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:21,341 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:21,341 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7752): Opening region: {ENCODED => 208eeab640ce5cbedd00f4a25021fc07, NAME => 'testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07.', STARTKEY => '', ENDKEY => '1'} 2024-11-16T06:25:21,341 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7794): checking encryption for ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:21,341 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7797): checking classloading for ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:21,341 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. service=AccessControlService 2024-11-16T06:25:21,341 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:25:21,341 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,342 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:21,342 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7794): checking encryption for 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,342 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7797): checking classloading for 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,342 INFO [StoreOpener-ca11b343935cb24c3277af6ab0705703-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:21,344 INFO [StoreOpener-ca11b343935cb24c3277af6ab0705703-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ca11b343935cb24c3277af6ab0705703 columnFamilyName cf 2024-11-16T06:25:21,345 DEBUG [StoreOpener-ca11b343935cb24c3277af6ab0705703-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:21,345 INFO [StoreOpener-208eeab640ce5cbedd00f4a25021fc07-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,345 INFO [StoreOpener-ca11b343935cb24c3277af6ab0705703-1 {}] regionserver.HStore(327): Store=ca11b343935cb24c3277af6ab0705703/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:25:21,346 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1038): replaying wal for ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:21,347 INFO [StoreOpener-208eeab640ce5cbedd00f4a25021fc07-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 208eeab640ce5cbedd00f4a25021fc07 columnFamilyName cf 2024-11-16T06:25:21,347 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:21,347 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:21,347 DEBUG [StoreOpener-208eeab640ce5cbedd00f4a25021fc07-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:21,347 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1048): stopping wal replay for ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:21,347 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1060): Cleaning up temporary data for ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:21,348 INFO [StoreOpener-208eeab640ce5cbedd00f4a25021fc07-1 {}] regionserver.HStore(327): Store=208eeab640ce5cbedd00f4a25021fc07/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:25:21,348 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1038): replaying wal for 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,349 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,349 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1093): writing seq id for ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:21,349 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,349 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1048): stopping wal replay for 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,349 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1060): Cleaning up temporary data for 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,351 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:25:21,352 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1114): Opened ca11b343935cb24c3277af6ab0705703; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71358921, jitterRate=0.06333078444004059}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:25:21,352 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:21,353 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1006): Region open journal for ca11b343935cb24c3277af6ab0705703: Running coprocessor pre-open hook at 1731738321341Writing region info on filesystem at 1731738321341Initializing all the Stores at 1731738321342 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738321342Cleaning up temporary data from old regions at 1731738321347 (+5 ms)Running coprocessor post-open hooks at 1731738321352 (+5 ms)Region opened successfully at 1731738321353 (+1 ms) 2024-11-16T06:25:21,354 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703., pid=40, masterSystemTime=1731738321336 2024-11-16T06:25:21,354 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1093): writing seq id for 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,356 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:21,356 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:21,357 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=ca11b343935cb24c3277af6ab0705703, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:25:21,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure ca11b343935cb24c3277af6ab0705703, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:25:21,367 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:25:21,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=38 2024-11-16T06:25:21,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=38, state=SUCCESS, hasLock=false; OpenRegionProcedure ca11b343935cb24c3277af6ab0705703, server=3456ee6a3164,35939,1731738278639 in 181 msec 2024-11-16T06:25:21,368 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1114): Opened 208eeab640ce5cbedd00f4a25021fc07; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62876918, jitterRate=-0.06306090950965881}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:25:21,368 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,368 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1006): Region open journal for 208eeab640ce5cbedd00f4a25021fc07: Running coprocessor pre-open hook at 1731738321342Writing region info on filesystem at 1731738321342Initializing all the Stores at 1731738321343 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738321343Cleaning up temporary data from old regions at 1731738321349 (+6 ms)Running coprocessor post-open hooks at 1731738321368 (+19 ms)Region opened successfully at 1731738321368 2024-11-16T06:25:21,369 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07., pid=39, masterSystemTime=1731738321334 2024-11-16T06:25:21,370 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca11b343935cb24c3277af6ab0705703, ASSIGN in 345 msec 2024-11-16T06:25:21,373 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:21,373 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:21,373 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=208eeab640ce5cbedd00f4a25021fc07, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:25:21,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure 208eeab640ce5cbedd00f4a25021fc07, server=3456ee6a3164,40155,1731738278871 because future has completed 2024-11-16T06:25:21,380 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=37 2024-11-16T06:25:21,381 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=37, state=SUCCESS, hasLock=false; OpenRegionProcedure 208eeab640ce5cbedd00f4a25021fc07, server=3456ee6a3164,40155,1731738278871 in 197 msec 2024-11-16T06:25:21,384 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=37, resume processing ppid=36 2024-11-16T06:25:21,384 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=208eeab640ce5cbedd00f4a25021fc07, ASSIGN in 359 msec 2024-11-16T06:25:21,385 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:25:21,386 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738321385"}]},"ts":"1731738321385"} 2024-11-16T06:25:21,388 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-16T06:25:21,389 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:25:21,390 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-16T06:25:21,395 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-16T06:25:21,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:21,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:21,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:21,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:21,411 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:21,411 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:21,411 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:21,411 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:21,411 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:21,412 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:21,412 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:21,412 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:21,417 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 473 msec 2024-11-16T06:25:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-16T06:25:21,566 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-16T06:25:21,566 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:21,569 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-16T06:25:21,569 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:21,570 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:25:21,572 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:21,578 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:21,581 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:21,583 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39400, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:21,586 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:21,597 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40155 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:25:21,599 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:25:21,601 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:21,604 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-16T06:25:21,604 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:21,604 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:25:21,606 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:21,613 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:21,621 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-16T06:25:21,625 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-16T06:25:21,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738321625 (current time:1731738321625). 2024-11-16T06:25:21,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-16T06:25:21,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:25:21,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d0e3c86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:21,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:21,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:21,627 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:21,627 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:21,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:21,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f15a90b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:21,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:21,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:21,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:21,630 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50194, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:21,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b06fd83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:21,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:21,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:21,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:21,635 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39580, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:21,637 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:21,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:21,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:21,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:21,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47317102, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:21,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:21,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:21,640 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:21,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:21,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:21,641 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:21,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44c43b90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:21,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:21,642 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:21,642 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:21,643 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50222, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:21,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a2ccd50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:21,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:21,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:21,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:21,650 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39596, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:21,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:25:21,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:21,655 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33096, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:21,657 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:21,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:21,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:21,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:21,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-16T06:25:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:25:21,659 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:21,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-16T06:25:21,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-16T06:25:21,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-16T06:25:21,662 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:25:21,663 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:25:21,667 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:25:21,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741919_1095 (size=143) 2024-11-16T06:25:21,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741919_1095 (size=143) 2024-11-16T06:25:21,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741919_1095 (size=143) 2024-11-16T06:25:21,682 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:25:21,682 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 208eeab640ce5cbedd00f4a25021fc07}, {pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ca11b343935cb24c3277af6ab0705703}] 2024-11-16T06:25:21,684 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:21,685 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-16T06:25:21,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=43 2024-11-16T06:25:21,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:21,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=42 2024-11-16T06:25:21,837 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2902): Flushing ca11b343935cb24c3277af6ab0705703 1/1 column families, dataSize=2.74 KB heapSize=6.16 KB 2024-11-16T06:25:21,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:21,838 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2902): Flushing 208eeab640ce5cbedd00f4a25021fc07 1/1 column families, dataSize=534 B heapSize=1.38 KB 2024-11-16T06:25:21,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411165c324be0e56f491f8baf96e52e12c152_ca11b343935cb24c3277af6ab0705703 is 71, key is 14580e3a08b82fada026b29abf834219/cf:q/1731738321599/Put/seqid=0 2024-11-16T06:25:21,870 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111662d71dc1120643058faab3ef5108407b_208eeab640ce5cbedd00f4a25021fc07 is 71, key is 001560db22f28e79e976ab856e394866/cf:q/1731738321597/Put/seqid=0 2024-11-16T06:25:21,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741920_1096 (size=5451) 2024-11-16T06:25:21,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741920_1096 (size=5451) 2024-11-16T06:25:21,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741920_1096 (size=5451) 2024-11-16T06:25:21,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:21,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741921_1097 (size=7821) 2024-11-16T06:25:21,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741921_1097 (size=7821) 2024-11-16T06:25:21,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741921_1097 (size=7821) 2024-11-16T06:25:21,931 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111662d71dc1120643058faab3ef5108407b_208eeab640ce5cbedd00f4a25021fc07 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024111662d71dc1120643058faab3ef5108407b_208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:21,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/.tmp/cf/f1882a7a10074820b9225ab14eeecdb6, store: [table=testExportWithResetTtl family=cf region=208eeab640ce5cbedd00f4a25021fc07] 2024-11-16T06:25:21,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/.tmp/cf/f1882a7a10074820b9225ab14eeecdb6 is 199, key is 07d531a4275dfe02255fda962c172eb11/cf:q/1731738321597/Put/seqid=0 2024-11-16T06:25:21,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741922_1098 (size=6859) 2024-11-16T06:25:21,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741922_1098 (size=6859) 2024-11-16T06:25:21,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741922_1098 (size=6859) 2024-11-16T06:25:21,959 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=534, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/.tmp/cf/f1882a7a10074820b9225ab14eeecdb6 2024-11-16T06:25:21,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-16T06:25:21,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/.tmp/cf/f1882a7a10074820b9225ab14eeecdb6 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/cf/f1882a7a10074820b9225ab14eeecdb6 2024-11-16T06:25:21,988 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/cf/f1882a7a10074820b9225ab14eeecdb6, entries=8, sequenceid=5, filesize=6.7 K 2024-11-16T06:25:21,992 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(3140): Finished flush of dataSize ~534 B/534, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 208eeab640ce5cbedd00f4a25021fc07 in 154ms, sequenceid=5, compaction requested=false 2024-11-16T06:25:21,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-16T06:25:21,993 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2603): Flush status journal for 208eeab640ce5cbedd00f4a25021fc07: 2024-11-16T06:25:21,993 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. for snaptb-testExportWithResetTtl completed. 2024-11-16T06:25:21,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-16T06:25:21,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:21,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/cf/f1882a7a10074820b9225ab14eeecdb6] hfiles 2024-11-16T06:25:21,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/cf/f1882a7a10074820b9225ab14eeecdb6 for snapshot=snaptb-testExportWithResetTtl 2024-11-16T06:25:22,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741923_1099 (size=100) 2024-11-16T06:25:22,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741923_1099 (size=100) 2024-11-16T06:25:22,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741923_1099 (size=100) 2024-11-16T06:25:22,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:22,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-11-16T06:25:22,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=42 2024-11-16T06:25:22,060 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:22,061 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:22,064 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 208eeab640ce5cbedd00f4a25021fc07 in 380 msec 2024-11-16T06:25:22,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-16T06:25:22,306 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:22,313 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411165c324be0e56f491f8baf96e52e12c152_ca11b343935cb24c3277af6ab0705703 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b202411165c324be0e56f491f8baf96e52e12c152_ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:22,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/.tmp/cf/4993658b624f4663aa97716b5c8c2ac4, store: [table=testExportWithResetTtl family=cf region=ca11b343935cb24c3277af6ab0705703] 2024-11-16T06:25:22,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/.tmp/cf/4993658b624f4663aa97716b5c8c2ac4 is 199, key is 14bb5bd541ec65cf5b8dc451131f78e12/cf:q/1731738321599/Put/seqid=0 2024-11-16T06:25:22,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741924_1100 (size=13542) 2024-11-16T06:25:22,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741924_1100 (size=13542) 2024-11-16T06:25:22,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741924_1100 (size=13542) 2024-11-16T06:25:22,322 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=2.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/.tmp/cf/4993658b624f4663aa97716b5c8c2ac4 2024-11-16T06:25:22,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/.tmp/cf/4993658b624f4663aa97716b5c8c2ac4 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/cf/4993658b624f4663aa97716b5c8c2ac4 2024-11-16T06:25:22,334 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/cf/4993658b624f4663aa97716b5c8c2ac4, entries=42, sequenceid=5, filesize=13.2 K 2024-11-16T06:25:22,335 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(3140): Finished flush of dataSize ~2.74 KB/2802, heapSize ~6.14 KB/6288, currentSize=0 B/0 for ca11b343935cb24c3277af6ab0705703 in 498ms, sequenceid=5, compaction requested=false 2024-11-16T06:25:22,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2603): Flush status journal for ca11b343935cb24c3277af6ab0705703: 2024-11-16T06:25:22,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. for snaptb-testExportWithResetTtl completed. 2024-11-16T06:25:22,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-16T06:25:22,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:22,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/cf/4993658b624f4663aa97716b5c8c2ac4] hfiles 2024-11-16T06:25:22,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/cf/4993658b624f4663aa97716b5c8c2ac4 for snapshot=snaptb-testExportWithResetTtl 2024-11-16T06:25:22,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741925_1101 (size=100) 2024-11-16T06:25:22,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741925_1101 (size=100) 2024-11-16T06:25:22,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741925_1101 (size=100) 2024-11-16T06:25:22,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:22,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=43 2024-11-16T06:25:22,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=43 2024-11-16T06:25:22,354 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:22,354 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:22,363 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=41 2024-11-16T06:25:22,363 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ca11b343935cb24c3277af6ab0705703 in 673 msec 2024-11-16T06:25:22,363 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:25:22,406 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:25:22,408 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:25:22,408 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:25:22,408 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:22,409 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b202411165c324be0e56f491f8baf96e52e12c152_ca11b343935cb24c3277af6ab0705703, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024111662d71dc1120643058faab3ef5108407b_208eeab640ce5cbedd00f4a25021fc07] hfiles 2024-11-16T06:25:22,410 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b202411165c324be0e56f491f8baf96e52e12c152_ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:22,410 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024111662d71dc1120643058faab3ef5108407b_208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:22,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741926_1102 (size=284) 2024-11-16T06:25:22,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741926_1102 (size=284) 2024-11-16T06:25:22,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741926_1102 (size=284) 2024-11-16T06:25:22,429 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:25:22,429 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-16T06:25:22,431 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-16T06:25:22,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741927_1103 (size=923) 2024-11-16T06:25:22,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741927_1103 (size=923) 2024-11-16T06:25:22,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741927_1103 (size=923) 2024-11-16T06:25:22,474 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:25:22,496 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:25:22,501 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-16T06:25:22,503 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:25:22,503 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-16T06:25:22,506 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 845 msec 2024-11-16T06:25:22,636 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_1/usercache/jenkins/appcache/application_1731738285782_0001/container_1731738285782_0001_01_000001/launch_container.sh] 2024-11-16T06:25:22,636 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_1/usercache/jenkins/appcache/application_1731738285782_0001/container_1731738285782_0001_01_000001/container_tokens] 2024-11-16T06:25:22,636 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_1/usercache/jenkins/appcache/application_1731738285782_0001/container_1731738285782_0001_01_000001/sysfs] 2024-11-16T06:25:22,642 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0001_000001 (auth:SIMPLE) from 127.0.0.1:45710 2024-11-16T06:25:22,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-16T06:25:22,795 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-16T06:25:22,809 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738322809 2024-11-16T06:25:22,809 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:36767, tgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738322809, rawTgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738322809, srcFsUri=hdfs://localhost:36767, srcDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:25:22,852 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:36767, inputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:25:22,852 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738322809, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738322809/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-16T06:25:22,855 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-16T06:25:22,867 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738322809/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-16T06:25:22,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741929_1105 (size=923) 2024-11-16T06:25:22,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741929_1105 (size=923) 2024-11-16T06:25:22,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741929_1105 (size=923) 2024-11-16T06:25:22,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741928_1104 (size=143) 2024-11-16T06:25:22,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741928_1104 (size=143) 2024-11-16T06:25:22,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741928_1104 (size=143) 2024-11-16T06:25:22,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741930_1106 (size=141) 2024-11-16T06:25:22,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741930_1106 (size=141) 2024-11-16T06:25:22,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741930_1106 (size=141) 2024-11-16T06:25:22,943 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:22,943 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:22,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:23,064 INFO [master/3456ee6a3164:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T06:25:23,065 INFO [master/3456ee6a3164:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T06:25:23,708 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:25:24,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-11413415710700427601.jar 2024-11-16T06:25:24,113 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:24,113 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:24,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-9479414260004557141.jar 2024-11-16T06:25:24,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:24,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:24,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:24,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:24,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:24,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:24,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-16T06:25:24,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-16T06:25:24,205 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-16T06:25:24,205 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-16T06:25:24,205 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-16T06:25:24,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-16T06:25:24,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-16T06:25:24,207 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-16T06:25:24,207 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-16T06:25:24,208 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-16T06:25:24,208 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-16T06:25:24,208 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:25:24,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:25:24,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:25:24,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:25:24,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:25:24,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:25:24,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:25:24,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741931_1107 (size=131440) 2024-11-16T06:25:24,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741931_1107 (size=131440) 2024-11-16T06:25:24,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741931_1107 (size=131440) 2024-11-16T06:25:24,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741932_1108 (size=4188619) 2024-11-16T06:25:24,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741932_1108 (size=4188619) 2024-11-16T06:25:24,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741932_1108 (size=4188619) 2024-11-16T06:25:24,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741933_1109 (size=440656) 2024-11-16T06:25:24,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741933_1109 (size=440656) 2024-11-16T06:25:24,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741933_1109 (size=440656) 2024-11-16T06:25:24,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741934_1110 (size=1323991) 2024-11-16T06:25:24,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741934_1110 (size=1323991) 2024-11-16T06:25:24,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741934_1110 (size=1323991) 2024-11-16T06:25:24,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741935_1111 (size=903732) 2024-11-16T06:25:24,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741935_1111 (size=903732) 2024-11-16T06:25:24,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741935_1111 (size=903732) 2024-11-16T06:25:24,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741936_1112 (size=8360083) 2024-11-16T06:25:24,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741936_1112 (size=8360083) 2024-11-16T06:25:24,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741936_1112 (size=8360083) 2024-11-16T06:25:24,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741937_1113 (size=6424738) 2024-11-16T06:25:24,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741937_1113 (size=6424738) 2024-11-16T06:25:24,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741937_1113 (size=6424738) 2024-11-16T06:25:24,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741938_1114 (size=1877034) 2024-11-16T06:25:24,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741938_1114 (size=1877034) 2024-11-16T06:25:24,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741938_1114 (size=1877034) 2024-11-16T06:25:24,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741939_1115 (size=77835) 2024-11-16T06:25:24,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741939_1115 (size=77835) 2024-11-16T06:25:24,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741939_1115 (size=77835) 2024-11-16T06:25:24,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741940_1116 (size=30949) 2024-11-16T06:25:24,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741940_1116 (size=30949) 2024-11-16T06:25:24,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741940_1116 (size=30949) 2024-11-16T06:25:24,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741941_1117 (size=1597327) 2024-11-16T06:25:24,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741941_1117 (size=1597327) 2024-11-16T06:25:24,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741941_1117 (size=1597327) 2024-11-16T06:25:24,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741942_1118 (size=4695811) 2024-11-16T06:25:24,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741942_1118 (size=4695811) 2024-11-16T06:25:24,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741942_1118 (size=4695811) 2024-11-16T06:25:24,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741943_1119 (size=232957) 2024-11-16T06:25:24,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741943_1119 (size=232957) 2024-11-16T06:25:24,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741943_1119 (size=232957) 2024-11-16T06:25:24,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741944_1120 (size=127628) 2024-11-16T06:25:24,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741944_1120 (size=127628) 2024-11-16T06:25:24,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741944_1120 (size=127628) 2024-11-16T06:25:25,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741945_1121 (size=20406) 2024-11-16T06:25:25,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741945_1121 (size=20406) 2024-11-16T06:25:25,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741945_1121 (size=20406) 2024-11-16T06:25:25,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741946_1122 (size=5175431) 2024-11-16T06:25:25,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741946_1122 (size=5175431) 2024-11-16T06:25:25,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741946_1122 (size=5175431) 2024-11-16T06:25:25,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741947_1123 (size=217634) 2024-11-16T06:25:25,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741947_1123 (size=217634) 2024-11-16T06:25:25,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741947_1123 (size=217634) 2024-11-16T06:25:25,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741948_1124 (size=1832290) 2024-11-16T06:25:25,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741948_1124 (size=1832290) 2024-11-16T06:25:25,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741948_1124 (size=1832290) 2024-11-16T06:25:25,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741949_1125 (size=322274) 2024-11-16T06:25:25,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741949_1125 (size=322274) 2024-11-16T06:25:25,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741949_1125 (size=322274) 2024-11-16T06:25:25,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741950_1126 (size=503880) 2024-11-16T06:25:25,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741950_1126 (size=503880) 2024-11-16T06:25:25,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741950_1126 (size=503880) 2024-11-16T06:25:25,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741951_1127 (size=29229) 2024-11-16T06:25:25,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741951_1127 (size=29229) 2024-11-16T06:25:25,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741951_1127 (size=29229) 2024-11-16T06:25:25,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741952_1128 (size=24096) 2024-11-16T06:25:25,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741952_1128 (size=24096) 2024-11-16T06:25:25,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741952_1128 (size=24096) 2024-11-16T06:25:25,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741953_1129 (size=111872) 2024-11-16T06:25:25,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741953_1129 (size=111872) 2024-11-16T06:25:25,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741953_1129 (size=111872) 2024-11-16T06:25:25,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741954_1130 (size=45609) 2024-11-16T06:25:25,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741954_1130 (size=45609) 2024-11-16T06:25:25,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741954_1130 (size=45609) 2024-11-16T06:25:25,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741955_1131 (size=136454) 2024-11-16T06:25:25,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741955_1131 (size=136454) 2024-11-16T06:25:25,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741955_1131 (size=136454) 2024-11-16T06:25:25,386 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-16T06:25:25,390 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-16T06:25:25,394 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=32.9 K 2024-11-16T06:25:25,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741956_1132 (size=686) 2024-11-16T06:25:25,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741956_1132 (size=686) 2024-11-16T06:25:25,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741956_1132 (size=686) 2024-11-16T06:25:25,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741957_1133 (size=15) 2024-11-16T06:25:25,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741957_1133 (size=15) 2024-11-16T06:25:25,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741957_1133 (size=15) 2024-11-16T06:25:25,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741958_1134 (size=303724) 2024-11-16T06:25:25,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741958_1134 (size=303724) 2024-11-16T06:25:25,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741958_1134 (size=303724) 2024-11-16T06:25:25,527 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:25:25,527 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:25:25,628 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0002_000001 (auth:SIMPLE) from 127.0.0.1:45716 2024-11-16T06:25:28,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-16T06:25:28,275 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-16T06:25:28,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-16T06:25:28,276 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-16T06:25:32,145 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0002_000001 (auth:SIMPLE) from 127.0.0.1:51006 2024-11-16T06:25:32,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741959_1135 (size=349374) 2024-11-16T06:25:32,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741959_1135 (size=349374) 2024-11-16T06:25:32,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741959_1135 (size=349374) 2024-11-16T06:25:34,436 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0002_000001 (auth:SIMPLE) from 127.0.0.1:37786 2024-11-16T06:25:36,745 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:25:38,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741960_1136 (size=13542) 2024-11-16T06:25:38,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741960_1136 (size=13542) 2024-11-16T06:25:38,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741960_1136 (size=13542) 2024-11-16T06:25:38,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741961_1137 (size=7821) 2024-11-16T06:25:38,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741961_1137 (size=7821) 2024-11-16T06:25:38,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741961_1137 (size=7821) 2024-11-16T06:25:38,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741962_1138 (size=6859) 2024-11-16T06:25:38,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741962_1138 (size=6859) 2024-11-16T06:25:38,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741962_1138 (size=6859) 2024-11-16T06:25:38,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741963_1139 (size=5451) 2024-11-16T06:25:38,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741963_1139 (size=5451) 2024-11-16T06:25:38,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741963_1139 (size=5451) 2024-11-16T06:25:38,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741964_1140 (size=17458) 2024-11-16T06:25:38,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741964_1140 (size=17458) 2024-11-16T06:25:38,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741964_1140 (size=17458) 2024-11-16T06:25:38,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741965_1141 (size=461) 2024-11-16T06:25:38,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741965_1141 (size=461) 2024-11-16T06:25:38,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741965_1141 (size=461) 2024-11-16T06:25:38,534 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_2/usercache/jenkins/appcache/application_1731738285782_0002/container_1731738285782_0002_01_000002/launch_container.sh] 2024-11-16T06:25:38,534 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_2/usercache/jenkins/appcache/application_1731738285782_0002/container_1731738285782_0002_01_000002/container_tokens] 2024-11-16T06:25:38,534 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_2/usercache/jenkins/appcache/application_1731738285782_0002/container_1731738285782_0002_01_000002/sysfs] 2024-11-16T06:25:38,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741966_1142 (size=17458) 2024-11-16T06:25:38,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741966_1142 (size=17458) 2024-11-16T06:25:38,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741966_1142 (size=17458) 2024-11-16T06:25:38,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741967_1143 (size=349374) 2024-11-16T06:25:38,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741967_1143 (size=349374) 2024-11-16T06:25:38,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741967_1143 (size=349374) 2024-11-16T06:25:38,581 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0002_000001 (auth:SIMPLE) from 127.0.0.1:37790 2024-11-16T06:25:39,703 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-16T06:25:39,704 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-16T06:25:39,711 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb-testExportWithResetTtl 2024-11-16T06:25:39,711 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-16T06:25:39,712 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-16T06:25:39,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-16T06:25:39,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-16T06:25:39,713 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-16T06:25:39,713 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738322809/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738322809/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-16T06:25:39,713 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738322809/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-16T06:25:39,713 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738322809/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-16T06:25:39,721 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-11-16T06:25:39,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-16T06:25:39,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-16T06:25:39,725 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738339724"}]},"ts":"1731738339724"} 2024-11-16T06:25:39,727 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-16T06:25:39,727 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-16T06:25:39,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-16T06:25:39,730 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=208eeab640ce5cbedd00f4a25021fc07, UNASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca11b343935cb24c3277af6ab0705703, UNASSIGN}] 2024-11-16T06:25:39,731 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca11b343935cb24c3277af6ab0705703, UNASSIGN 2024-11-16T06:25:39,732 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=208eeab640ce5cbedd00f4a25021fc07, UNASSIGN 2024-11-16T06:25:39,732 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=ca11b343935cb24c3277af6ab0705703, regionState=CLOSING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:25:39,732 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=208eeab640ce5cbedd00f4a25021fc07, regionState=CLOSING, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:25:39,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca11b343935cb24c3277af6ab0705703, UNASSIGN because future has completed 2024-11-16T06:25:39,735 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:25:39,735 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure ca11b343935cb24c3277af6ab0705703, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:25:39,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=208eeab640ce5cbedd00f4a25021fc07, UNASSIGN because future has completed 2024-11-16T06:25:39,736 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:25:39,736 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure 208eeab640ce5cbedd00f4a25021fc07, server=3456ee6a3164,40155,1731738278871}] 2024-11-16T06:25:39,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-16T06:25:39,888 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(122): Close ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:39,889 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:25:39,889 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1722): Closing ca11b343935cb24c3277af6ab0705703, disabling compactions & flushes 2024-11-16T06:25:39,889 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:39,889 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:39,889 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. after waiting 0 ms 2024-11-16T06:25:39,889 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:39,889 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(122): Close 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:39,889 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:25:39,890 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1722): Closing 208eeab640ce5cbedd00f4a25021fc07, disabling compactions & flushes 2024-11-16T06:25:39,890 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:39,890 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:39,890 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. after waiting 0 ms 2024-11-16T06:25:39,890 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:39,896 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-16T06:25:39,896 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:25:39,896 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703. 2024-11-16T06:25:39,897 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1676): Region close journal for ca11b343935cb24c3277af6ab0705703: Waiting for close lock at 1731738339889Running coprocessor pre-close hooks at 1731738339889Disabling compacts and flushes for region at 1731738339889Disabling writes for close at 1731738339889Writing region close event to WAL at 1731738339890 (+1 ms)Running coprocessor post-close hooks at 1731738339896 (+6 ms)Closed at 1731738339896 2024-11-16T06:25:39,899 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(157): Closed ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:39,899 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=ca11b343935cb24c3277af6ab0705703, regionState=CLOSED 2024-11-16T06:25:39,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure ca11b343935cb24c3277af6ab0705703, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:25:39,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-11-16T06:25:39,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; CloseRegionProcedure ca11b343935cb24c3277af6ab0705703, server=3456ee6a3164,35939,1731738278639 in 167 msec 2024-11-16T06:25:39,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca11b343935cb24c3277af6ab0705703, UNASSIGN in 174 msec 2024-11-16T06:25:39,910 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-16T06:25:39,910 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:25:39,911 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07. 2024-11-16T06:25:39,911 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1676): Region close journal for 208eeab640ce5cbedd00f4a25021fc07: Waiting for close lock at 1731738339890Running coprocessor pre-close hooks at 1731738339890Disabling compacts and flushes for region at 1731738339890Disabling writes for close at 1731738339890Writing region close event to WAL at 1731738339891 (+1 ms)Running coprocessor post-close hooks at 1731738339910 (+19 ms)Closed at 1731738339911 (+1 ms) 2024-11-16T06:25:39,913 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(157): Closed 208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:39,913 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=208eeab640ce5cbedd00f4a25021fc07, regionState=CLOSED 2024-11-16T06:25:39,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure 208eeab640ce5cbedd00f4a25021fc07, server=3456ee6a3164,40155,1731738278871 because future has completed 2024-11-16T06:25:39,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-11-16T06:25:39,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; CloseRegionProcedure 208eeab640ce5cbedd00f4a25021fc07, server=3456ee6a3164,40155,1731738278871 in 180 msec 2024-11-16T06:25:39,920 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-11-16T06:25:39,920 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=208eeab640ce5cbedd00f4a25021fc07, UNASSIGN in 188 msec 2024-11-16T06:25:39,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=45, resume processing ppid=44 2024-11-16T06:25:39,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, ppid=44, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 192 msec 2024-11-16T06:25:39,923 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738339923"}]},"ts":"1731738339923"} 2024-11-16T06:25:39,925 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-16T06:25:39,925 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-16T06:25:39,927 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 205 msec 2024-11-16T06:25:40,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-16T06:25:40,045 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-16T06:25:40,046 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-11-16T06:25:40,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-16T06:25:40,051 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-16T06:25:40,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-16T06:25:40,057 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-16T06:25:40,063 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=50, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-16T06:25:40,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-16T06:25:40,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-16T06:25:40,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-16T06:25:40,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-16T06:25:40,066 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-16T06:25:40,067 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-16T06:25:40,067 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-16T06:25:40,067 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-16T06:25:40,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-16T06:25:40,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-16T06:25:40,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-16T06:25:40,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:40,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:40,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-16T06:25:40,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:40,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:40,071 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:40,071 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-16T06:25:40,072 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:40,073 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:40,077 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:40,077 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:40,081 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/recovered.edits] 2024-11-16T06:25:40,085 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/recovered.edits] 2024-11-16T06:25:40,091 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/cf/f1882a7a10074820b9225ab14eeecdb6 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/cf/f1882a7a10074820b9225ab14eeecdb6 2024-11-16T06:25:40,092 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/cf/4993658b624f4663aa97716b5c8c2ac4 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/cf/4993658b624f4663aa97716b5c8c2ac4 2024-11-16T06:25:40,095 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/recovered.edits/8.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07/recovered.edits/8.seqid 2024-11-16T06:25:40,096 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/recovered.edits/8.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703/recovered.edits/8.seqid 2024-11-16T06:25:40,096 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:40,096 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportWithResetTtl/208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:40,096 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-16T06:25:40,097 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-11-16T06:25:40,098 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-11-16T06:25:40,102 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b202411165c324be0e56f491f8baf96e52e12c152_ca11b343935cb24c3277af6ab0705703 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b202411165c324be0e56f491f8baf96e52e12c152_ca11b343935cb24c3277af6ab0705703 2024-11-16T06:25:40,103 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024111662d71dc1120643058faab3ef5108407b_208eeab640ce5cbedd00f4a25021fc07 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024111662d71dc1120643058faab3ef5108407b_208eeab640ce5cbedd00f4a25021fc07 2024-11-16T06:25:40,104 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-11-16T06:25:40,108 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=50, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-16T06:25:40,112 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-16T06:25:40,116 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-16T06:25:40,118 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=50, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-16T06:25:40,118 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-16T06:25:40,119 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738340118"}]},"ts":"9223372036854775807"} 2024-11-16T06:25:40,119 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738340118"}]},"ts":"9223372036854775807"} 2024-11-16T06:25:40,122 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-16T06:25:40,122 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 208eeab640ce5cbedd00f4a25021fc07, NAME => 'testExportWithResetTtl,,1731738320936.208eeab640ce5cbedd00f4a25021fc07.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ca11b343935cb24c3277af6ab0705703, NAME => 'testExportWithResetTtl,1,1731738320936.ca11b343935cb24c3277af6ab0705703.', STARTKEY => '1', ENDKEY => ''}] 2024-11-16T06:25:40,122 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-16T06:25:40,123 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731738340122"}]},"ts":"9223372036854775807"} 2024-11-16T06:25:40,125 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-16T06:25:40,127 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=50, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-16T06:25:40,129 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 81 msec 2024-11-16T06:25:40,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-16T06:25:40,176 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-16T06:25:40,176 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-16T06:25:40,177 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-11-16T06:25:40,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=51, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-16T06:25:40,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-16T06:25:40,181 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738340181"}]},"ts":"1731738340181"} 2024-11-16T06:25:40,184 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-16T06:25:40,184 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-16T06:25:40,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-16T06:25:40,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b8b204e287a13c4f6a6abd23bb44cb18, UNASSIGN}, {pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c6bf8111ffadc92c646f6257cdec9554, UNASSIGN}] 2024-11-16T06:25:40,188 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c6bf8111ffadc92c646f6257cdec9554, UNASSIGN 2024-11-16T06:25:40,189 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b8b204e287a13c4f6a6abd23bb44cb18, UNASSIGN 2024-11-16T06:25:40,189 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=c6bf8111ffadc92c646f6257cdec9554, regionState=CLOSING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:25:40,189 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=b8b204e287a13c4f6a6abd23bb44cb18, regionState=CLOSING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:25:40,190 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=3456ee6a3164,37533,1731738278939, table=testtb-testExportWithResetTtl, region=b8b204e287a13c4f6a6abd23bb44cb18. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-16T06:25:40,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c6bf8111ffadc92c646f6257cdec9554, UNASSIGN because future has completed 2024-11-16T06:25:40,192 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:25:40,192 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=55, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure c6bf8111ffadc92c646f6257cdec9554, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:25:40,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b8b204e287a13c4f6a6abd23bb44cb18, UNASSIGN because future has completed 2024-11-16T06:25:40,193 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:25:40,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=56, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:25:40,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-16T06:25:40,345 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(122): Close c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:40,345 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:25:40,345 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1722): Closing c6bf8111ffadc92c646f6257cdec9554, disabling compactions & flushes 2024-11-16T06:25:40,345 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:40,346 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:40,346 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. after waiting 0 ms 2024-11-16T06:25:40,346 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:40,347 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(122): Close b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:40,347 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:25:40,347 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1722): Closing b8b204e287a13c4f6a6abd23bb44cb18, disabling compactions & flushes 2024-11-16T06:25:40,348 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:40,348 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:40,348 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. after waiting 0 ms 2024-11-16T06:25:40,348 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:40,358 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:25:40,360 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:25:40,360 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554. 2024-11-16T06:25:40,360 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1676): Region close journal for c6bf8111ffadc92c646f6257cdec9554: Waiting for close lock at 1731738340345Running coprocessor pre-close hooks at 1731738340345Disabling compacts and flushes for region at 1731738340345Disabling writes for close at 1731738340346 (+1 ms)Writing region close event to WAL at 1731738340352 (+6 ms)Running coprocessor post-close hooks at 1731738340360 (+8 ms)Closed at 1731738340360 2024-11-16T06:25:40,363 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(157): Closed c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:40,364 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=c6bf8111ffadc92c646f6257cdec9554, regionState=CLOSED 2024-11-16T06:25:40,365 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:25:40,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=55, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure c6bf8111ffadc92c646f6257cdec9554, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:25:40,369 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:25:40,369 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18. 2024-11-16T06:25:40,369 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1676): Region close journal for b8b204e287a13c4f6a6abd23bb44cb18: Waiting for close lock at 1731738340347Running coprocessor pre-close hooks at 1731738340347Disabling compacts and flushes for region at 1731738340347Disabling writes for close at 1731738340348 (+1 ms)Writing region close event to WAL at 1731738340357 (+9 ms)Running coprocessor post-close hooks at 1731738340369 (+12 ms)Closed at 1731738340369 2024-11-16T06:25:40,372 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(157): Closed b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:40,373 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=b8b204e287a13c4f6a6abd23bb44cb18, regionState=CLOSED 2024-11-16T06:25:40,375 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=54 2024-11-16T06:25:40,375 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=54, state=SUCCESS, hasLock=false; CloseRegionProcedure c6bf8111ffadc92c646f6257cdec9554, server=3456ee6a3164,35939,1731738278639 in 178 msec 2024-11-16T06:25:40,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=56, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:25:40,380 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c6bf8111ffadc92c646f6257cdec9554, UNASSIGN in 188 msec 2024-11-16T06:25:40,383 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=56, resume processing ppid=53 2024-11-16T06:25:40,384 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, ppid=53, state=SUCCESS, hasLock=false; CloseRegionProcedure b8b204e287a13c4f6a6abd23bb44cb18, server=3456ee6a3164,37533,1731738278939 in 188 msec 2024-11-16T06:25:40,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=53, resume processing ppid=52 2024-11-16T06:25:40,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b8b204e287a13c4f6a6abd23bb44cb18, UNASSIGN in 196 msec 2024-11-16T06:25:40,394 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=51 2024-11-16T06:25:40,394 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=51, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 204 msec 2024-11-16T06:25:40,398 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738340397"}]},"ts":"1731738340397"} 2024-11-16T06:25:40,401 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-16T06:25:40,401 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-16T06:25:40,408 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 226 msec 2024-11-16T06:25:40,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-16T06:25:40,495 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-16T06:25:40,496 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-11-16T06:25:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-16T06:25:40,503 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-16T06:25:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-16T06:25:40,505 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=57, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-16T06:25:40,508 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-16T06:25:40,513 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:40,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-16T06:25:40,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-16T06:25:40,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-16T06:25:40,515 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-16T06:25:40,515 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-16T06:25:40,515 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-16T06:25:40,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-16T06:25:40,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-16T06:25:40,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:40,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-16T06:25:40,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-16T06:25:40,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:40,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:40,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-16T06:25:40,518 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-11-16T06:25:40,518 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-16T06:25:40,520 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/recovered.edits] 2024-11-16T06:25:40,525 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/cf/ca89056e768d4de9a40cb01e68521bbc to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/cf/ca89056e768d4de9a40cb01e68521bbc 2024-11-16T06:25:40,531 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18/recovered.edits/9.seqid 2024-11-16T06:25:40,532 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:40,533 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:40,536 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/recovered.edits] 2024-11-16T06:25:40,542 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/cf/aeb8d1d24c2e45d58b0b1240d7e07d9a to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/cf/aeb8d1d24c2e45d58b0b1240d7e07d9a 2024-11-16T06:25:40,549 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554/recovered.edits/9.seqid 2024-11-16T06:25:40,551 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithResetTtl/c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:40,551 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-16T06:25:40,552 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-11-16T06:25:40,554 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-11-16T06:25:40,560 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024111675a38be1ffb94fa0afa9687de687398b_c6bf8111ffadc92c646f6257cdec9554 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024111675a38be1ffb94fa0afa9687de687398b_c6bf8111ffadc92c646f6257cdec9554 2024-11-16T06:25:40,563 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241116fd56b3801ccf4d2c9ca4dc0d34c59c61_b8b204e287a13c4f6a6abd23bb44cb18 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241116fd56b3801ccf4d2c9ca4dc0d34c59c61_b8b204e287a13c4f6a6abd23bb44cb18 2024-11-16T06:25:40,565 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-11-16T06:25:40,570 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=57, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-16T06:25:40,574 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-16T06:25:40,578 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-16T06:25:40,580 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=57, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-16T06:25:40,580 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-16T06:25:40,580 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738340580"}]},"ts":"9223372036854775807"} 2024-11-16T06:25:40,580 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738340580"}]},"ts":"9223372036854775807"} 2024-11-16T06:25:40,583 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-16T06:25:40,584 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b8b204e287a13c4f6a6abd23bb44cb18, NAME => 'testtb-testExportWithResetTtl,,1731738318712.b8b204e287a13c4f6a6abd23bb44cb18.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c6bf8111ffadc92c646f6257cdec9554, NAME => 'testtb-testExportWithResetTtl,1,1731738318712.c6bf8111ffadc92c646f6257cdec9554.', STARTKEY => '1', ENDKEY => ''}] 2024-11-16T06:25:40,584 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-16T06:25:40,584 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731738340584"}]},"ts":"9223372036854775807"} 2024-11-16T06:25:40,587 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-16T06:25:40,588 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=57, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-16T06:25:40,591 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 92 msec 2024-11-16T06:25:40,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-16T06:25:40,626 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-16T06:25:40,626 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-16T06:25:40,649 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-16T06:25:40,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-16T06:25:40,656 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-16T06:25:40,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-16T06:25:40,661 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-16T06:25:40,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-16T06:25:40,711 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=783 (was 770) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:52906 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2174 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40557 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (692179358) connection to localhost/127.0.0.1:41855 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41855 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40307 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_288311573_1 at /127.0.0.1:35980 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 113875) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:42514 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_288311573_1 at /127.0.0.1:40582 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (692179358) connection to localhost/127.0.0.1:40557 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:49006 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=819 (was 807) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=679 (was 625) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 19), AvailableMemoryMB=550 (was 1129) 2024-11-16T06:25:40,711 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=783 is superior to 500 2024-11-16T06:25:40,736 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=783, OpenFileDescriptor=819, MaxFileDescriptor=1048576, SystemLoadAverage=679, ProcessCount=19, AvailableMemoryMB=544 2024-11-16T06:25:40,736 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=783 is superior to 500 2024-11-16T06:25:40,738 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:25:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-16T06:25:40,742 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:25:40,744 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 58 2024-11-16T06:25:40,746 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:25:40,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-16T06:25:40,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741968_1144 (size=443) 2024-11-16T06:25:40,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741968_1144 (size=443) 2024-11-16T06:25:40,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741968_1144 (size=443) 2024-11-16T06:25:40,788 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9f7cf21eab98410c35305664c5b8052c, NAME => 'testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:25:40,791 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 04992db8c3a017304a8eea9e516323bf, NAME => 'testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:25:40,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741969_1145 (size=68) 2024-11-16T06:25:40,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741969_1145 (size=68) 2024-11-16T06:25:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-16T06:25:40,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741969_1145 (size=68) 2024-11-16T06:25:40,856 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:40,856 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 9f7cf21eab98410c35305664c5b8052c, disabling compactions & flushes 2024-11-16T06:25:40,856 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:40,857 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:40,857 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. after waiting 0 ms 2024-11-16T06:25:40,857 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:40,857 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:40,857 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9f7cf21eab98410c35305664c5b8052c: Waiting for close lock at 1731738340856Disabling compacts and flushes for region at 1731738340856Disabling writes for close at 1731738340857 (+1 ms)Writing region close event to WAL at 1731738340857Closed at 1731738340857 2024-11-16T06:25:40,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741970_1146 (size=68) 2024-11-16T06:25:40,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741970_1146 (size=68) 2024-11-16T06:25:40,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741970_1146 (size=68) 2024-11-16T06:25:40,860 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:40,860 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 04992db8c3a017304a8eea9e516323bf, disabling compactions & flushes 2024-11-16T06:25:40,861 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:40,861 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:40,861 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. after waiting 0 ms 2024-11-16T06:25:40,861 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:40,861 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:40,861 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 04992db8c3a017304a8eea9e516323bf: Waiting for close lock at 1731738340860Disabling compacts and flushes for region at 1731738340860Disabling writes for close at 1731738340861 (+1 ms)Writing region close event to WAL at 1731738340861Closed at 1731738340861 2024-11-16T06:25:40,862 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:25:40,863 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731738340862"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738340862"}]},"ts":"1731738340862"} 2024-11-16T06:25:40,863 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731738340862"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738340862"}]},"ts":"1731738340862"} 2024-11-16T06:25:40,866 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-16T06:25:40,868 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:25:40,869 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738340868"}]},"ts":"1731738340868"} 2024-11-16T06:25:40,873 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-16T06:25:40,874 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:25:40,875 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:25:40,876 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:25:40,876 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:25:40,876 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:25:40,876 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:25:40,876 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:25:40,876 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:25:40,876 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:25:40,876 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:25:40,876 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:25:40,876 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9f7cf21eab98410c35305664c5b8052c, ASSIGN}, {pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04992db8c3a017304a8eea9e516323bf, ASSIGN}] 2024-11-16T06:25:40,878 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9f7cf21eab98410c35305664c5b8052c, ASSIGN 2024-11-16T06:25:40,879 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04992db8c3a017304a8eea9e516323bf, ASSIGN 2024-11-16T06:25:40,879 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9f7cf21eab98410c35305664c5b8052c, ASSIGN; state=OFFLINE, location=3456ee6a3164,35939,1731738278639; forceNewPlan=false, retain=false 2024-11-16T06:25:40,881 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04992db8c3a017304a8eea9e516323bf, ASSIGN; state=OFFLINE, location=3456ee6a3164,40155,1731738278871; forceNewPlan=false, retain=false 2024-11-16T06:25:41,030 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-16T06:25:41,031 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=9f7cf21eab98410c35305664c5b8052c, regionState=OPENING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:25:41,031 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=04992db8c3a017304a8eea9e516323bf, regionState=OPENING, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:25:41,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9f7cf21eab98410c35305664c5b8052c, ASSIGN because future has completed 2024-11-16T06:25:41,033 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9f7cf21eab98410c35305664c5b8052c, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:25:41,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04992db8c3a017304a8eea9e516323bf, ASSIGN because future has completed 2024-11-16T06:25:41,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=62, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 04992db8c3a017304a8eea9e516323bf, server=3456ee6a3164,40155,1731738278871}] 2024-11-16T06:25:41,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-16T06:25:41,190 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:41,190 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7752): Opening region: {ENCODED => 9f7cf21eab98410c35305664c5b8052c, NAME => 'testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c.', STARTKEY => '', ENDKEY => '1'} 2024-11-16T06:25:41,191 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. service=AccessControlService 2024-11-16T06:25:41,191 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:25:41,191 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,191 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:41,192 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:41,192 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7794): checking encryption for 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,192 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7797): checking classloading for 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,192 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7752): Opening region: {ENCODED => 04992db8c3a017304a8eea9e516323bf, NAME => 'testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf.', STARTKEY => '1', ENDKEY => ''} 2024-11-16T06:25:41,192 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. service=AccessControlService 2024-11-16T06:25:41,192 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:25:41,192 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:41,192 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:41,192 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7794): checking encryption for 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:41,192 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7797): checking classloading for 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:41,193 INFO [StoreOpener-9f7cf21eab98410c35305664c5b8052c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,194 INFO [StoreOpener-04992db8c3a017304a8eea9e516323bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:41,196 INFO [StoreOpener-04992db8c3a017304a8eea9e516323bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04992db8c3a017304a8eea9e516323bf columnFamilyName cf 2024-11-16T06:25:41,196 INFO [StoreOpener-9f7cf21eab98410c35305664c5b8052c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9f7cf21eab98410c35305664c5b8052c columnFamilyName cf 2024-11-16T06:25:41,197 DEBUG [StoreOpener-04992db8c3a017304a8eea9e516323bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:41,197 DEBUG [StoreOpener-9f7cf21eab98410c35305664c5b8052c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:41,197 INFO [StoreOpener-04992db8c3a017304a8eea9e516323bf-1 {}] regionserver.HStore(327): Store=04992db8c3a017304a8eea9e516323bf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:25:41,198 INFO [StoreOpener-9f7cf21eab98410c35305664c5b8052c-1 {}] regionserver.HStore(327): Store=9f7cf21eab98410c35305664c5b8052c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:25:41,198 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1038): replaying wal for 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:41,198 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1038): replaying wal for 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,199 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:41,199 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,199 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:41,199 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,199 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1048): stopping wal replay for 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:41,199 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1060): Cleaning up temporary data for 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:41,200 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1048): stopping wal replay for 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,200 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1060): Cleaning up temporary data for 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,201 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1093): writing seq id for 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:41,201 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1093): writing seq id for 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,203 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:25:41,203 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:25:41,204 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1114): Opened 04992db8c3a017304a8eea9e516323bf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65531463, jitterRate=-0.023505106568336487}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:25:41,204 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1114): Opened 9f7cf21eab98410c35305664c5b8052c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71811106, jitterRate=0.07006886601448059}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:25:41,204 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:41,204 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,205 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1006): Region open journal for 04992db8c3a017304a8eea9e516323bf: Running coprocessor pre-open hook at 1731738341193Writing region info on filesystem at 1731738341193Initializing all the Stores at 1731738341193Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738341193Cleaning up temporary data from old regions at 1731738341200 (+7 ms)Running coprocessor post-open hooks at 1731738341204 (+4 ms)Region opened successfully at 1731738341205 (+1 ms) 2024-11-16T06:25:41,205 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1006): Region open journal for 9f7cf21eab98410c35305664c5b8052c: Running coprocessor pre-open hook at 1731738341192Writing region info on filesystem at 1731738341192Initializing all the Stores at 1731738341193 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738341193Cleaning up temporary data from old regions at 1731738341200 (+7 ms)Running coprocessor post-open hooks at 1731738341204 (+4 ms)Region opened successfully at 1731738341205 (+1 ms) 2024-11-16T06:25:41,206 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf., pid=62, masterSystemTime=1731738341188 2024-11-16T06:25:41,209 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c., pid=61, masterSystemTime=1731738341186 2024-11-16T06:25:41,209 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:41,210 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:41,210 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=04992db8c3a017304a8eea9e516323bf, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:25:41,211 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:41,211 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:41,212 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=9f7cf21eab98410c35305664c5b8052c, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:25:41,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=62, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 04992db8c3a017304a8eea9e516323bf, server=3456ee6a3164,40155,1731738278871 because future has completed 2024-11-16T06:25:41,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9f7cf21eab98410c35305664c5b8052c, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:25:41,220 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=62, resume processing ppid=60 2024-11-16T06:25:41,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-11-16T06:25:41,225 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; OpenRegionProcedure 9f7cf21eab98410c35305664c5b8052c, server=3456ee6a3164,35939,1731738278639 in 184 msec 2024-11-16T06:25:41,227 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04992db8c3a017304a8eea9e516323bf, ASSIGN in 344 msec 2024-11-16T06:25:41,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, ppid=60, state=SUCCESS, hasLock=false; OpenRegionProcedure 04992db8c3a017304a8eea9e516323bf, server=3456ee6a3164,40155,1731738278871 in 180 msec 2024-11-16T06:25:41,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=58 2024-11-16T06:25:41,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9f7cf21eab98410c35305664c5b8052c, ASSIGN in 349 msec 2024-11-16T06:25:41,231 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:25:41,231 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738341231"}]},"ts":"1731738341231"} 2024-11-16T06:25:41,235 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-16T06:25:41,236 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:25:41,236 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-16T06:25:41,241 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-16T06:25:41,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:41,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:41,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:41,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:41,246 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:41,246 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:41,247 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:41,248 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:41,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 510 msec 2024-11-16T06:25:41,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-16T06:25:41,376 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-16T06:25:41,376 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:25:41,384 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-16T06:25:41,384 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:41,385 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:25:41,388 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:25:41,396 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:25:41,402 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:25:41,405 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-16T06:25:41,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738341405 (current time:1731738341405). 2024-11-16T06:25:41,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:25:41,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-16T06:25:41,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:25:41,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bfd9fdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:41,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:41,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:41,408 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:41,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:41,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:41,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57703b66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:41,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:41,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:41,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:41,410 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43178, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:41,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36c33a92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:41,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:41,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:41,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:41,415 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38168, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:41,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:41,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:41,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:41,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:41,417 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:41,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4944ce52, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:41,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:41,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:41,419 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:41,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:41,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:41,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18e6f430, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:41,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:41,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:41,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:41,421 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43198, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:41,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9b1070c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:41,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:41,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:41,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:41,426 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38184, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:41,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:25:41,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:41,430 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43012, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:41,431 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:41,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:41,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:41,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:41,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-16T06:25:41,431 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:41,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:25:41,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-16T06:25:41,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-16T06:25:41,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-16T06:25:41,435 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:25:41,436 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:25:41,439 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:25:41,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741971_1147 (size=170) 2024-11-16T06:25:41,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741971_1147 (size=170) 2024-11-16T06:25:41,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741971_1147 (size=170) 2024-11-16T06:25:41,447 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:25:41,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9f7cf21eab98410c35305664c5b8052c}, {pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04992db8c3a017304a8eea9e516323bf}] 2024-11-16T06:25:41,448 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,448 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:41,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-16T06:25:41,596 DEBUG [master/3456ee6a3164:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region d0bbbb84adc872abb747d3f48a8c9338 changed from -1.0 to 0.0, refreshing cache 2024-11-16T06:25:41,597 DEBUG [master/3456ee6a3164:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 9f7cf21eab98410c35305664c5b8052c changed from -1.0 to 0.0, refreshing cache 2024-11-16T06:25:41,597 DEBUG [master/3456ee6a3164:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 04992db8c3a017304a8eea9e516323bf changed from -1.0 to 0.0, refreshing cache 2024-11-16T06:25:41,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=64 2024-11-16T06:25:41,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=65 2024-11-16T06:25:41,600 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:41,600 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:41,600 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.HRegion(2603): Flush status journal for 9f7cf21eab98410c35305664c5b8052c: 2024-11-16T06:25:41,600 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. for emptySnaptb0-testExportFileSystemState completed. 2024-11-16T06:25:41,600 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.HRegion(2603): Flush status journal for 04992db8c3a017304a8eea9e516323bf: 2024-11-16T06:25:41,600 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. for emptySnaptb0-testExportFileSystemState completed. 2024-11-16T06:25:41,600 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-16T06:25:41,601 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:41,601 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-16T06:25:41,601 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:41,601 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:25:41,601 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:25:41,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741972_1148 (size=71) 2024-11-16T06:25:41,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741973_1149 (size=71) 2024-11-16T06:25:41,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741972_1148 (size=71) 2024-11-16T06:25:41,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741973_1149 (size=71) 2024-11-16T06:25:41,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741973_1149 (size=71) 2024-11-16T06:25:41,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741972_1148 (size=71) 2024-11-16T06:25:41,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:41,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-16T06:25:41,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=64 2024-11-16T06:25:41,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,612 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:41,615 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9f7cf21eab98410c35305664c5b8052c in 166 msec 2024-11-16T06:25:41,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-16T06:25:42,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:42,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=65 2024-11-16T06:25:42,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=65 2024-11-16T06:25:42,013 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:42,013 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:42,017 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-11-16T06:25:42,017 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 04992db8c3a017304a8eea9e516323bf in 567 msec 2024-11-16T06:25:42,017 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:25:42,018 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:25:42,020 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:25:42,020 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:25:42,020 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:42,020 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-16T06:25:42,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741974_1150 (size=63) 2024-11-16T06:25:42,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741974_1150 (size=63) 2024-11-16T06:25:42,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741974_1150 (size=63) 2024-11-16T06:25:42,033 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:25:42,033 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-16T06:25:42,034 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-16T06:25:42,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741975_1151 (size=653) 2024-11-16T06:25:42,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741975_1151 (size=653) 2024-11-16T06:25:42,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741975_1151 (size=653) 2024-11-16T06:25:42,049 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:25:42,054 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:25:42,055 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-16T06:25:42,057 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:25:42,057 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-16T06:25:42,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 625 msec 2024-11-16T06:25:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-16T06:25:42,065 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-16T06:25:42,071 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:25:42,074 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40155 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:25:42,076 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:25:42,079 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-16T06:25:42,079 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:42,079 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:25:42,081 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:25:42,087 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:25:42,093 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:25:42,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-16T06:25:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738342096 (current time:1731738342096). 2024-11-16T06:25:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:25:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-16T06:25:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:25:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ce19479, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:42,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:42,097 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:42,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:42,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:42,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@581b543c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:42,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:42,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:42,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:42,099 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43216, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:42,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32267bfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:42,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:42,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:42,101 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38192, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:42,102 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:42,102 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e10dec0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:42,104 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:42,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:42,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:42,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e6063e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:42,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:42,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:42,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:42,106 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43236, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:42,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b44223a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:42,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:42,108 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:42,108 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:42,109 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38204, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:42,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:25:42,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:42,112 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43020, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:42,113 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:42,113 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-16T06:25:42,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:25:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-16T06:25:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-16T06:25:42,116 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:25:42,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-16T06:25:42,117 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:25:42,120 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:25:42,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741976_1152 (size=165) 2024-11-16T06:25:42,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741976_1152 (size=165) 2024-11-16T06:25:42,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741976_1152 (size=165) 2024-11-16T06:25:42,127 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:25:42,127 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9f7cf21eab98410c35305664c5b8052c}, {pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04992db8c3a017304a8eea9e516323bf}] 2024-11-16T06:25:42,128 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:42,128 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:42,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-16T06:25:42,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=67 2024-11-16T06:25:42,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=68 2024-11-16T06:25:42,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:42,280 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2902): Flushing 9f7cf21eab98410c35305664c5b8052c 1/1 column families, dataSize=65 B heapSize=400 B 2024-11-16T06:25:42,281 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:42,282 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2902): Flushing 04992db8c3a017304a8eea9e516323bf 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-11-16T06:25:42,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411162e66d6b8bebd49158edf453b0093e35a_9f7cf21eab98410c35305664c5b8052c is 69, key is 060018319189347e33029444e5dc7f899/cf:q/1731738342071/Put/seqid=0 2024-11-16T06:25:42,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741977_1153 (size=4964) 2024-11-16T06:25:42,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741977_1153 (size=4964) 2024-11-16T06:25:42,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741977_1153 (size=4964) 2024-11-16T06:25:42,313 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:42,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024111655f493897cc34b75978cb744e59cffd7_04992db8c3a017304a8eea9e516323bf is 71, key is 123246ad41f72c609c88c074853f8327/cf:q/1731738342074/Put/seqid=0 2024-11-16T06:25:42,320 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411162e66d6b8bebd49158edf453b0093e35a_9f7cf21eab98410c35305664c5b8052c to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411162e66d6b8bebd49158edf453b0093e35a_9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:42,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/.tmp/cf/3957e206f01b4610bc0a051608cc9f12, store: [table=testtb-testExportFileSystemState family=cf region=9f7cf21eab98410c35305664c5b8052c] 2024-11-16T06:25:42,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/.tmp/cf/3957e206f01b4610bc0a051608cc9f12 is 209, key is 060018319189347e33029444e5dc7f899/cf:q/1731738342071/Put/seqid=0 2024-11-16T06:25:42,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741978_1154 (size=8311) 2024-11-16T06:25:42,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741978_1154 (size=8311) 2024-11-16T06:25:42,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741978_1154 (size=8311) 2024-11-16T06:25:42,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:42,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741979_1155 (size=5504) 2024-11-16T06:25:42,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741979_1155 (size=5504) 2024-11-16T06:25:42,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741979_1155 (size=5504) 2024-11-16T06:25:42,337 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024111655f493897cc34b75978cb744e59cffd7_04992db8c3a017304a8eea9e516323bf to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024111655f493897cc34b75978cb744e59cffd7_04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:42,338 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=65, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/.tmp/cf/3957e206f01b4610bc0a051608cc9f12 2024-11-16T06:25:42,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/.tmp/cf/9d4234f4c06846a989fb5e4ed6ee692f, store: [table=testtb-testExportFileSystemState family=cf region=04992db8c3a017304a8eea9e516323bf] 2024-11-16T06:25:42,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/.tmp/cf/9d4234f4c06846a989fb5e4ed6ee692f is 209, key is 161bcc4a5c9e9b16599a23db7b7d89616/cf:q/1731738342074/Put/seqid=0 2024-11-16T06:25:42,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/.tmp/cf/3957e206f01b4610bc0a051608cc9f12 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/cf/3957e206f01b4610bc0a051608cc9f12 2024-11-16T06:25:42,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741980_1156 (size=15407) 2024-11-16T06:25:42,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741980_1156 (size=15407) 2024-11-16T06:25:42,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741980_1156 (size=15407) 2024-11-16T06:25:42,348 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/.tmp/cf/9d4234f4c06846a989fb5e4ed6ee692f 2024-11-16T06:25:42,353 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/cf/3957e206f01b4610bc0a051608cc9f12, entries=1, sequenceid=6, filesize=5.4 K 2024-11-16T06:25:42,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/.tmp/cf/9d4234f4c06846a989fb5e4ed6ee692f as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/cf/9d4234f4c06846a989fb5e4ed6ee692f 2024-11-16T06:25:42,355 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 9f7cf21eab98410c35305664c5b8052c in 75ms, sequenceid=6, compaction requested=false 2024-11-16T06:25:42,356 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-16T06:25:42,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2603): Flush status journal for 9f7cf21eab98410c35305664c5b8052c: 2024-11-16T06:25:42,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. for snaptb0-testExportFileSystemState completed. 2024-11-16T06:25:42,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-16T06:25:42,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:42,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/cf/3957e206f01b4610bc0a051608cc9f12] hfiles 2024-11-16T06:25:42,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/cf/3957e206f01b4610bc0a051608cc9f12 for snapshot=snaptb0-testExportFileSystemState 2024-11-16T06:25:42,362 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/cf/9d4234f4c06846a989fb5e4ed6ee692f, entries=49, sequenceid=6, filesize=15.0 K 2024-11-16T06:25:42,363 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 04992db8c3a017304a8eea9e516323bf in 82ms, sequenceid=6, compaction requested=false 2024-11-16T06:25:42,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2603): Flush status journal for 04992db8c3a017304a8eea9e516323bf: 2024-11-16T06:25:42,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. for snaptb0-testExportFileSystemState completed. 2024-11-16T06:25:42,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-16T06:25:42,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:42,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/cf/9d4234f4c06846a989fb5e4ed6ee692f] hfiles 2024-11-16T06:25:42,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/cf/9d4234f4c06846a989fb5e4ed6ee692f for snapshot=snaptb0-testExportFileSystemState 2024-11-16T06:25:42,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741981_1157 (size=110) 2024-11-16T06:25:42,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741981_1157 (size=110) 2024-11-16T06:25:42,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741981_1157 (size=110) 2024-11-16T06:25:42,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:42,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=67 2024-11-16T06:25:42,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=67 2024-11-16T06:25:42,369 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:42,369 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:42,371 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9f7cf21eab98410c35305664c5b8052c in 243 msec 2024-11-16T06:25:42,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741982_1158 (size=110) 2024-11-16T06:25:42,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741982_1158 (size=110) 2024-11-16T06:25:42,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:42,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-16T06:25:42,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741982_1158 (size=110) 2024-11-16T06:25:42,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=68 2024-11-16T06:25:42,376 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:42,376 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:42,379 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=68, resume processing ppid=66 2024-11-16T06:25:42,379 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 04992db8c3a017304a8eea9e516323bf in 250 msec 2024-11-16T06:25:42,379 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:25:42,380 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:25:42,381 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:25:42,381 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:25:42,381 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:42,383 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024111655f493897cc34b75978cb744e59cffd7_04992db8c3a017304a8eea9e516323bf, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411162e66d6b8bebd49158edf453b0093e35a_9f7cf21eab98410c35305664c5b8052c] hfiles 2024-11-16T06:25:42,383 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024111655f493897cc34b75978cb744e59cffd7_04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:42,383 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411162e66d6b8bebd49158edf453b0093e35a_9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:42,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741983_1159 (size=294) 2024-11-16T06:25:42,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741983_1159 (size=294) 2024-11-16T06:25:42,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741983_1159 (size=294) 2024-11-16T06:25:42,392 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:25:42,392 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-16T06:25:42,393 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-16T06:25:42,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741984_1160 (size=963) 2024-11-16T06:25:42,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741984_1160 (size=963) 2024-11-16T06:25:42,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741984_1160 (size=963) 2024-11-16T06:25:42,409 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:25:42,416 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:25:42,416 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-16T06:25:42,418 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:25:42,418 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-16T06:25:42,420 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 304 msec 2024-11-16T06:25:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-16T06:25:42,435 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-16T06:25:42,436 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738342436 2024-11-16T06:25:42,436 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:36767, tgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738342436, rawTgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738342436, srcFsUri=hdfs://localhost:36767, srcDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:25:42,470 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:36767, inputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:25:42,470 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738342436, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738342436/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-16T06:25:42,472 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-16T06:25:42,488 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738342436/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-16T06:25:42,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741985_1161 (size=165) 2024-11-16T06:25:42,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741985_1161 (size=165) 2024-11-16T06:25:42,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741985_1161 (size=165) 2024-11-16T06:25:42,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741986_1162 (size=963) 2024-11-16T06:25:42,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741986_1162 (size=963) 2024-11-16T06:25:42,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741986_1162 (size=963) 2024-11-16T06:25:42,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:42,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:42,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:43,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-15964869230530003521.jar 2024-11-16T06:25:43,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:43,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:43,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-4655582595304228337.jar 2024-11-16T06:25:43,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:43,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:43,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:43,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:43,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:43,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:25:43,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-16T06:25:43,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-16T06:25:43,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-16T06:25:43,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-16T06:25:43,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-16T06:25:43,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-16T06:25:43,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-16T06:25:43,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-16T06:25:43,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-16T06:25:43,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-16T06:25:43,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-16T06:25:43,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:25:43,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:25:43,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:25:43,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:25:43,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:25:43,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:25:43,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:25:43,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741987_1163 (size=440656) 2024-11-16T06:25:43,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741987_1163 (size=440656) 2024-11-16T06:25:43,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741987_1163 (size=440656) 2024-11-16T06:25:43,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741988_1164 (size=131440) 2024-11-16T06:25:43,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741988_1164 (size=131440) 2024-11-16T06:25:43,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741988_1164 (size=131440) 2024-11-16T06:25:43,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741989_1165 (size=4188619) 2024-11-16T06:25:43,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741989_1165 (size=4188619) 2024-11-16T06:25:43,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741989_1165 (size=4188619) 2024-11-16T06:25:44,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741990_1166 (size=6424738) 2024-11-16T06:25:44,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741990_1166 (size=6424738) 2024-11-16T06:25:44,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741990_1166 (size=6424738) 2024-11-16T06:25:44,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741991_1167 (size=1323991) 2024-11-16T06:25:44,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741991_1167 (size=1323991) 2024-11-16T06:25:44,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741991_1167 (size=1323991) 2024-11-16T06:25:44,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741992_1168 (size=903732) 2024-11-16T06:25:44,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741992_1168 (size=903732) 2024-11-16T06:25:44,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741992_1168 (size=903732) 2024-11-16T06:25:44,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741993_1169 (size=8360083) 2024-11-16T06:25:44,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741993_1169 (size=8360083) 2024-11-16T06:25:44,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741993_1169 (size=8360083) 2024-11-16T06:25:44,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741994_1170 (size=1877034) 2024-11-16T06:25:44,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741994_1170 (size=1877034) 2024-11-16T06:25:44,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741994_1170 (size=1877034) 2024-11-16T06:25:44,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741995_1171 (size=77835) 2024-11-16T06:25:44,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741995_1171 (size=77835) 2024-11-16T06:25:44,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741995_1171 (size=77835) 2024-11-16T06:25:44,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741996_1172 (size=30949) 2024-11-16T06:25:44,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741996_1172 (size=30949) 2024-11-16T06:25:44,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741996_1172 (size=30949) 2024-11-16T06:25:44,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741997_1173 (size=1597327) 2024-11-16T06:25:44,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741997_1173 (size=1597327) 2024-11-16T06:25:44,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741997_1173 (size=1597327) 2024-11-16T06:25:44,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741998_1174 (size=4695811) 2024-11-16T06:25:44,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741998_1174 (size=4695811) 2024-11-16T06:25:44,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741998_1174 (size=4695811) 2024-11-16T06:25:44,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741999_1175 (size=232957) 2024-11-16T06:25:44,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741999_1175 (size=232957) 2024-11-16T06:25:44,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741999_1175 (size=232957) 2024-11-16T06:25:44,679 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0002_000001 (auth:SIMPLE) from 127.0.0.1:42586 2024-11-16T06:25:44,695 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_2/usercache/jenkins/appcache/application_1731738285782_0002/container_1731738285782_0002_01_000001/launch_container.sh] 2024-11-16T06:25:44,696 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_2/usercache/jenkins/appcache/application_1731738285782_0002/container_1731738285782_0002_01_000001/container_tokens] 2024-11-16T06:25:44,696 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_2/usercache/jenkins/appcache/application_1731738285782_0002/container_1731738285782_0002_01_000001/sysfs] 2024-11-16T06:25:44,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742000_1176 (size=127628) 2024-11-16T06:25:44,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742000_1176 (size=127628) 2024-11-16T06:25:44,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742000_1176 (size=127628) 2024-11-16T06:25:44,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742001_1177 (size=20406) 2024-11-16T06:25:44,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742001_1177 (size=20406) 2024-11-16T06:25:44,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742001_1177 (size=20406) 2024-11-16T06:25:44,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742002_1178 (size=5175431) 2024-11-16T06:25:44,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742002_1178 (size=5175431) 2024-11-16T06:25:44,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742002_1178 (size=5175431) 2024-11-16T06:25:44,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742003_1179 (size=217634) 2024-11-16T06:25:44,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742003_1179 (size=217634) 2024-11-16T06:25:44,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742003_1179 (size=217634) 2024-11-16T06:25:44,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742004_1180 (size=1832290) 2024-11-16T06:25:44,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742004_1180 (size=1832290) 2024-11-16T06:25:44,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742004_1180 (size=1832290) 2024-11-16T06:25:44,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742005_1181 (size=322274) 2024-11-16T06:25:44,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742005_1181 (size=322274) 2024-11-16T06:25:44,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742005_1181 (size=322274) 2024-11-16T06:25:44,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742006_1182 (size=503880) 2024-11-16T06:25:44,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742006_1182 (size=503880) 2024-11-16T06:25:44,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742006_1182 (size=503880) 2024-11-16T06:25:44,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742007_1183 (size=29229) 2024-11-16T06:25:44,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742007_1183 (size=29229) 2024-11-16T06:25:44,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742007_1183 (size=29229) 2024-11-16T06:25:44,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742008_1184 (size=24096) 2024-11-16T06:25:44,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742008_1184 (size=24096) 2024-11-16T06:25:44,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742008_1184 (size=24096) 2024-11-16T06:25:44,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742009_1185 (size=111872) 2024-11-16T06:25:44,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742009_1185 (size=111872) 2024-11-16T06:25:44,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742009_1185 (size=111872) 2024-11-16T06:25:44,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742010_1186 (size=45609) 2024-11-16T06:25:44,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742010_1186 (size=45609) 2024-11-16T06:25:44,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742010_1186 (size=45609) 2024-11-16T06:25:44,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742011_1187 (size=136454) 2024-11-16T06:25:44,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742011_1187 (size=136454) 2024-11-16T06:25:44,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742011_1187 (size=136454) 2024-11-16T06:25:44,981 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-16T06:25:44,984 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-16T06:25:44,987 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.4 K 2024-11-16T06:25:45,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742012_1188 (size=726) 2024-11-16T06:25:45,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742012_1188 (size=726) 2024-11-16T06:25:45,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742012_1188 (size=726) 2024-11-16T06:25:45,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742013_1189 (size=15) 2024-11-16T06:25:45,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742013_1189 (size=15) 2024-11-16T06:25:45,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742013_1189 (size=15) 2024-11-16T06:25:45,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742014_1190 (size=303736) 2024-11-16T06:25:45,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742014_1190 (size=303736) 2024-11-16T06:25:45,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742014_1190 (size=303736) 2024-11-16T06:25:45,058 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:25:45,058 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:25:45,439 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:25:45,525 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0003_000001 (auth:SIMPLE) from 127.0.0.1:39086 2024-11-16T06:25:48,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-16T06:25:48,275 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-16T06:25:48,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-16T06:25:48,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-16T06:25:50,757 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0003_000001 (auth:SIMPLE) from 127.0.0.1:50994 2024-11-16T06:25:51,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742015_1191 (size=349386) 2024-11-16T06:25:51,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742015_1191 (size=349386) 2024-11-16T06:25:51,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742015_1191 (size=349386) 2024-11-16T06:25:53,036 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0003_000001 (auth:SIMPLE) from 127.0.0.1:52306 2024-11-16T06:25:53,779 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:25:55,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742016_1192 (size=15407) 2024-11-16T06:25:55,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742016_1192 (size=15407) 2024-11-16T06:25:55,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742016_1192 (size=15407) 2024-11-16T06:25:55,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742017_1193 (size=8311) 2024-11-16T06:25:55,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742017_1193 (size=8311) 2024-11-16T06:25:55,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742017_1193 (size=8311) 2024-11-16T06:25:55,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742018_1194 (size=5504) 2024-11-16T06:25:55,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742018_1194 (size=5504) 2024-11-16T06:25:55,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742018_1194 (size=5504) 2024-11-16T06:25:55,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742019_1195 (size=4964) 2024-11-16T06:25:55,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742019_1195 (size=4964) 2024-11-16T06:25:55,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742019_1195 (size=4964) 2024-11-16T06:25:55,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742020_1196 (size=17462) 2024-11-16T06:25:55,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742020_1196 (size=17462) 2024-11-16T06:25:55,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742020_1196 (size=17462) 2024-11-16T06:25:55,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742021_1197 (size=465) 2024-11-16T06:25:55,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742021_1197 (size=465) 2024-11-16T06:25:55,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742021_1197 (size=465) 2024-11-16T06:25:56,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742022_1198 (size=17462) 2024-11-16T06:25:56,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742022_1198 (size=17462) 2024-11-16T06:25:56,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742022_1198 (size=17462) 2024-11-16T06:25:56,034 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_2/usercache/jenkins/appcache/application_1731738285782_0003/container_1731738285782_0003_01_000002/launch_container.sh] 2024-11-16T06:25:56,034 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_2/usercache/jenkins/appcache/application_1731738285782_0003/container_1731738285782_0003_01_000002/container_tokens] 2024-11-16T06:25:56,034 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_2/usercache/jenkins/appcache/application_1731738285782_0003/container_1731738285782_0003_01_000002/sysfs] 2024-11-16T06:25:56,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742023_1199 (size=349386) 2024-11-16T06:25:56,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742023_1199 (size=349386) 2024-11-16T06:25:56,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742023_1199 (size=349386) 2024-11-16T06:25:56,054 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0003_000001 (auth:SIMPLE) from 127.0.0.1:52320 2024-11-16T06:25:57,205 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-16T06:25:57,208 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-16T06:25:57,231 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemState 2024-11-16T06:25:57,231 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-16T06:25:57,232 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-16T06:25:57,232 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-16T06:25:57,233 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-16T06:25:57,233 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-16T06:25:57,233 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738342436/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738342436/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-16T06:25:57,233 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738342436/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-16T06:25:57,233 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738342436/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-16T06:25:57,243 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-11-16T06:25:57,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=69, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-16T06:25:57,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-16T06:25:57,249 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738357248"}]},"ts":"1731738357248"} 2024-11-16T06:25:57,252 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-16T06:25:57,252 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-16T06:25:57,253 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-16T06:25:57,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9f7cf21eab98410c35305664c5b8052c, UNASSIGN}, {pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04992db8c3a017304a8eea9e516323bf, UNASSIGN}] 2024-11-16T06:25:57,256 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04992db8c3a017304a8eea9e516323bf, UNASSIGN 2024-11-16T06:25:57,256 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9f7cf21eab98410c35305664c5b8052c, UNASSIGN 2024-11-16T06:25:57,257 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=9f7cf21eab98410c35305664c5b8052c, regionState=CLOSING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:25:57,257 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=04992db8c3a017304a8eea9e516323bf, regionState=CLOSING, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:25:57,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9f7cf21eab98410c35305664c5b8052c, UNASSIGN because future has completed 2024-11-16T06:25:57,259 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:25:57,259 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9f7cf21eab98410c35305664c5b8052c, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:25:57,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04992db8c3a017304a8eea9e516323bf, UNASSIGN because future has completed 2024-11-16T06:25:57,261 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:25:57,261 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 04992db8c3a017304a8eea9e516323bf, server=3456ee6a3164,40155,1731738278871}] 2024-11-16T06:25:57,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-16T06:25:57,412 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(122): Close 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:57,413 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:25:57,413 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1722): Closing 9f7cf21eab98410c35305664c5b8052c, disabling compactions & flushes 2024-11-16T06:25:57,413 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:57,413 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:57,413 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. after waiting 0 ms 2024-11-16T06:25:57,413 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:57,413 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(122): Close 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:57,413 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:25:57,414 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1722): Closing 04992db8c3a017304a8eea9e516323bf, disabling compactions & flushes 2024-11-16T06:25:57,414 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:57,414 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:57,414 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. after waiting 0 ms 2024-11-16T06:25:57,414 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:57,421 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:25:57,422 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:25:57,422 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:25:57,422 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:25:57,422 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf. 2024-11-16T06:25:57,422 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c. 2024-11-16T06:25:57,422 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1676): Region close journal for 9f7cf21eab98410c35305664c5b8052c: Waiting for close lock at 1731738357413Running coprocessor pre-close hooks at 1731738357413Disabling compacts and flushes for region at 1731738357413Disabling writes for close at 1731738357413Writing region close event to WAL at 1731738357414 (+1 ms)Running coprocessor post-close hooks at 1731738357422 (+8 ms)Closed at 1731738357422 2024-11-16T06:25:57,422 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1676): Region close journal for 04992db8c3a017304a8eea9e516323bf: Waiting for close lock at 1731738357413Running coprocessor pre-close hooks at 1731738357413Disabling compacts and flushes for region at 1731738357414 (+1 ms)Disabling writes for close at 1731738357414Writing region close event to WAL at 1731738357414Running coprocessor post-close hooks at 1731738357422 (+8 ms)Closed at 1731738357422 2024-11-16T06:25:57,424 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(157): Closed 04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:57,424 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=04992db8c3a017304a8eea9e516323bf, regionState=CLOSED 2024-11-16T06:25:57,424 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(157): Closed 9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:57,425 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=9f7cf21eab98410c35305664c5b8052c, regionState=CLOSED 2024-11-16T06:25:57,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 04992db8c3a017304a8eea9e516323bf, server=3456ee6a3164,40155,1731738278871 because future has completed 2024-11-16T06:25:57,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9f7cf21eab98410c35305664c5b8052c, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:25:57,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=74, resume processing ppid=72 2024-11-16T06:25:57,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, ppid=72, state=SUCCESS, hasLock=false; CloseRegionProcedure 04992db8c3a017304a8eea9e516323bf, server=3456ee6a3164,40155,1731738278871 in 166 msec 2024-11-16T06:25:57,430 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04992db8c3a017304a8eea9e516323bf, UNASSIGN in 174 msec 2024-11-16T06:25:57,430 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-11-16T06:25:57,430 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; CloseRegionProcedure 9f7cf21eab98410c35305664c5b8052c, server=3456ee6a3164,35939,1731738278639 in 169 msec 2024-11-16T06:25:57,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=71, resume processing ppid=70 2024-11-16T06:25:57,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9f7cf21eab98410c35305664c5b8052c, UNASSIGN in 175 msec 2024-11-16T06:25:57,434 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=69 2024-11-16T06:25:57,434 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=69, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 179 msec 2024-11-16T06:25:57,435 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738357435"}]},"ts":"1731738357435"} 2024-11-16T06:25:57,437 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-16T06:25:57,437 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-16T06:25:57,439 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 193 msec 2024-11-16T06:25:57,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-16T06:25:57,566 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-16T06:25:57,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-11-16T06:25:57,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-16T06:25:57,568 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-16T06:25:57,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-16T06:25:57,569 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=75, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-16T06:25:57,572 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-16T06:25:57,574 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:57,576 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/recovered.edits] 2024-11-16T06:25:57,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-16T06:25:57,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-16T06:25:57,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-16T06:25:57,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-16T06:25:57,581 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/cf/3957e206f01b4610bc0a051608cc9f12 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/cf/3957e206f01b4610bc0a051608cc9f12 2024-11-16T06:25:57,583 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-16T06:25:57,583 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-16T06:25:57,583 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-16T06:25:57,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-16T06:25:57,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-16T06:25:57,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:57,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-16T06:25:57,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:57,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:57,584 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data null 2024-11-16T06:25:57,584 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-16T06:25:57,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:57,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-16T06:25:57,585 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c/recovered.edits/9.seqid 2024-11-16T06:25:57,586 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:57,591 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:57,594 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/recovered.edits] 2024-11-16T06:25:57,598 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/cf/9d4234f4c06846a989fb5e4ed6ee692f to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/cf/9d4234f4c06846a989fb5e4ed6ee692f 2024-11-16T06:25:57,602 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf/recovered.edits/9.seqid 2024-11-16T06:25:57,602 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemState/04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:57,602 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-16T06:25:57,603 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-11-16T06:25:57,604 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-11-16T06:25:57,607 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024111655f493897cc34b75978cb744e59cffd7_04992db8c3a017304a8eea9e516323bf to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024111655f493897cc34b75978cb744e59cffd7_04992db8c3a017304a8eea9e516323bf 2024-11-16T06:25:57,609 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411162e66d6b8bebd49158edf453b0093e35a_9f7cf21eab98410c35305664c5b8052c to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411162e66d6b8bebd49158edf453b0093e35a_9f7cf21eab98410c35305664c5b8052c 2024-11-16T06:25:57,609 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-11-16T06:25:57,611 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=75, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-16T06:25:57,615 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-16T06:25:57,618 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-16T06:25:57,619 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=75, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-16T06:25:57,619 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-16T06:25:57,619 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738357619"}]},"ts":"9223372036854775807"} 2024-11-16T06:25:57,619 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738357619"}]},"ts":"9223372036854775807"} 2024-11-16T06:25:57,622 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-16T06:25:57,622 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 9f7cf21eab98410c35305664c5b8052c, NAME => 'testtb-testExportFileSystemState,,1731738340738.9f7cf21eab98410c35305664c5b8052c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 04992db8c3a017304a8eea9e516323bf, NAME => 'testtb-testExportFileSystemState,1,1731738340738.04992db8c3a017304a8eea9e516323bf.', STARTKEY => '1', ENDKEY => ''}] 2024-11-16T06:25:57,622 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-16T06:25:57,623 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731738357622"}]},"ts":"9223372036854775807"} 2024-11-16T06:25:57,625 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-16T06:25:57,626 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=75, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-16T06:25:57,627 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 60 msec 2024-11-16T06:25:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-16T06:25:57,695 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-16T06:25:57,695 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-16T06:25:57,704 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-16T06:25:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-16T06:25:57,709 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-16T06:25:57,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-16T06:25:57,737 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=786 (was 783) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:40332 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:46442 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (692179358) connection to localhost/127.0.0.1:45921 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:48104 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2932 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36617 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 117026) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1319957474_1 at /127.0.0.1:40302 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=806 (was 819), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=699 (was 679) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 19), AvailableMemoryMB=624 (was 544) - AvailableMemoryMB LEAK? - 2024-11-16T06:25:57,737 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=786 is superior to 500 2024-11-16T06:25:57,755 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=786, OpenFileDescriptor=806, MaxFileDescriptor=1048576, SystemLoadAverage=699, ProcessCount=19, AvailableMemoryMB=623 2024-11-16T06:25:57,755 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=786 is superior to 500 2024-11-16T06:25:57,757 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:25:57,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-16T06:25:57,760 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:25:57,761 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 76 2024-11-16T06:25:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-16T06:25:57,762 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:25:57,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742024_1200 (size=440) 2024-11-16T06:25:57,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742024_1200 (size=440) 2024-11-16T06:25:57,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742024_1200 (size=440) 2024-11-16T06:25:57,776 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 430970423552b1d6601a0fc13165c26b, NAME => 'testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:25:57,776 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 790d0ea9a8687ec3cb897ef67a695db1, NAME => 'testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:25:57,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742026_1202 (size=65) 2024-11-16T06:25:57,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742026_1202 (size=65) 2024-11-16T06:25:57,786 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:57,786 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 790d0ea9a8687ec3cb897ef67a695db1, disabling compactions & flushes 2024-11-16T06:25:57,786 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:25:57,786 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:25:57,786 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. after waiting 0 ms 2024-11-16T06:25:57,786 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:25:57,786 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:25:57,786 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 790d0ea9a8687ec3cb897ef67a695db1: Waiting for close lock at 1731738357786Disabling compacts and flushes for region at 1731738357786Disabling writes for close at 1731738357786Writing region close event to WAL at 1731738357786Closed at 1731738357786 2024-11-16T06:25:57,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742025_1201 (size=65) 2024-11-16T06:25:57,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742026_1202 (size=65) 2024-11-16T06:25:57,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742025_1201 (size=65) 2024-11-16T06:25:57,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742025_1201 (size=65) 2024-11-16T06:25:57,788 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:57,788 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 430970423552b1d6601a0fc13165c26b, disabling compactions & flushes 2024-11-16T06:25:57,788 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:57,788 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:57,788 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. after waiting 0 ms 2024-11-16T06:25:57,788 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:57,788 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:57,788 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 430970423552b1d6601a0fc13165c26b: Waiting for close lock at 1731738357788Disabling compacts and flushes for region at 1731738357788Disabling writes for close at 1731738357788Writing region close event to WAL at 1731738357788Closed at 1731738357788 2024-11-16T06:25:57,790 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:25:57,790 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731738357790"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738357790"}]},"ts":"1731738357790"} 2024-11-16T06:25:57,790 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731738357790"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738357790"}]},"ts":"1731738357790"} 2024-11-16T06:25:57,792 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-16T06:25:57,794 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:25:57,794 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738357794"}]},"ts":"1731738357794"} 2024-11-16T06:25:57,796 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-16T06:25:57,796 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:25:57,798 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:25:57,798 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:25:57,798 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:25:57,798 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:25:57,798 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:25:57,798 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:25:57,798 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:25:57,798 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:25:57,798 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:25:57,798 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:25:57,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=430970423552b1d6601a0fc13165c26b, ASSIGN}, {pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=790d0ea9a8687ec3cb897ef67a695db1, ASSIGN}] 2024-11-16T06:25:57,799 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=790d0ea9a8687ec3cb897ef67a695db1, ASSIGN 2024-11-16T06:25:57,800 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=430970423552b1d6601a0fc13165c26b, ASSIGN 2024-11-16T06:25:57,807 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=790d0ea9a8687ec3cb897ef67a695db1, ASSIGN; state=OFFLINE, location=3456ee6a3164,37533,1731738278939; forceNewPlan=false, retain=false 2024-11-16T06:25:57,808 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=430970423552b1d6601a0fc13165c26b, ASSIGN; state=OFFLINE, location=3456ee6a3164,35939,1731738278639; forceNewPlan=false, retain=false 2024-11-16T06:25:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-16T06:25:57,958 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-16T06:25:57,959 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=790d0ea9a8687ec3cb897ef67a695db1, regionState=OPENING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:25:57,960 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=430970423552b1d6601a0fc13165c26b, regionState=OPENING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:25:57,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=790d0ea9a8687ec3cb897ef67a695db1, ASSIGN because future has completed 2024-11-16T06:25:57,964 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=79, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:25:57,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=430970423552b1d6601a0fc13165c26b, ASSIGN because future has completed 2024-11-16T06:25:57,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure 430970423552b1d6601a0fc13165c26b, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:25:58,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-16T06:25:58,126 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:58,126 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:25:58,126 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7752): Opening region: {ENCODED => 790d0ea9a8687ec3cb897ef67a695db1, NAME => 'testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1.', STARTKEY => '1', ENDKEY => ''} 2024-11-16T06:25:58,126 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7752): Opening region: {ENCODED => 430970423552b1d6601a0fc13165c26b, NAME => 'testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b.', STARTKEY => '', ENDKEY => '1'} 2024-11-16T06:25:58,126 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. service=AccessControlService 2024-11-16T06:25:58,126 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:25:58,126 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. service=AccessControlService 2024-11-16T06:25:58,127 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:25:58,127 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,127 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,127 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:58,127 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:25:58,127 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7794): checking encryption for 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,127 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7794): checking encryption for 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,127 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7797): checking classloading for 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,127 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7797): checking classloading for 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,129 INFO [StoreOpener-790d0ea9a8687ec3cb897ef67a695db1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,129 INFO [StoreOpener-430970423552b1d6601a0fc13165c26b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,131 INFO [StoreOpener-430970423552b1d6601a0fc13165c26b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 430970423552b1d6601a0fc13165c26b columnFamilyName cf 2024-11-16T06:25:58,131 INFO [StoreOpener-790d0ea9a8687ec3cb897ef67a695db1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 790d0ea9a8687ec3cb897ef67a695db1 columnFamilyName cf 2024-11-16T06:25:58,133 DEBUG [StoreOpener-430970423552b1d6601a0fc13165c26b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:58,133 DEBUG [StoreOpener-790d0ea9a8687ec3cb897ef67a695db1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:58,133 INFO [StoreOpener-790d0ea9a8687ec3cb897ef67a695db1-1 {}] regionserver.HStore(327): Store=790d0ea9a8687ec3cb897ef67a695db1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:25:58,133 INFO [StoreOpener-430970423552b1d6601a0fc13165c26b-1 {}] regionserver.HStore(327): Store=430970423552b1d6601a0fc13165c26b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:25:58,134 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1038): replaying wal for 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,134 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1038): replaying wal for 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,134 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,134 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,135 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,135 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,135 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1048): stopping wal replay for 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,135 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1048): stopping wal replay for 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,135 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1060): Cleaning up temporary data for 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,135 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1060): Cleaning up temporary data for 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,136 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1093): writing seq id for 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,137 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1093): writing seq id for 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,139 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:25:58,139 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1114): Opened 430970423552b1d6601a0fc13165c26b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73008641, jitterRate=0.08791352808475494}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:25:58,139 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,140 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1006): Region open journal for 430970423552b1d6601a0fc13165c26b: Running coprocessor pre-open hook at 1731738358127Writing region info on filesystem at 1731738358127Initializing all the Stores at 1731738358128 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738358128Cleaning up temporary data from old regions at 1731738358135 (+7 ms)Running coprocessor post-open hooks at 1731738358139 (+4 ms)Region opened successfully at 1731738358140 (+1 ms) 2024-11-16T06:25:58,141 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b., pid=80, masterSystemTime=1731738358122 2024-11-16T06:25:58,146 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:58,147 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:25:58,147 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:58,147 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1114): Opened 790d0ea9a8687ec3cb897ef67a695db1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67806867, jitterRate=0.010401055216789246}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:25:58,147 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,147 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1006): Region open journal for 790d0ea9a8687ec3cb897ef67a695db1: Running coprocessor pre-open hook at 1731738358127Writing region info on filesystem at 1731738358127Initializing all the Stores at 1731738358128 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738358128Cleaning up temporary data from old regions at 1731738358135 (+7 ms)Running coprocessor post-open hooks at 1731738358147 (+12 ms)Region opened successfully at 1731738358147 2024-11-16T06:25:58,148 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=430970423552b1d6601a0fc13165c26b, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:25:58,148 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1., pid=79, masterSystemTime=1731738358122 2024-11-16T06:25:58,150 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:25:58,150 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:25:58,150 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=790d0ea9a8687ec3cb897ef67a695db1, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:25:58,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=80, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure 430970423552b1d6601a0fc13165c26b, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:25:58,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=79, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:25:58,157 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=77 2024-11-16T06:25:58,157 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=77, state=SUCCESS, hasLock=false; OpenRegionProcedure 430970423552b1d6601a0fc13165c26b, server=3456ee6a3164,35939,1731738278639 in 185 msec 2024-11-16T06:25:58,157 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=79, resume processing ppid=78 2024-11-16T06:25:58,157 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, ppid=78, state=SUCCESS, hasLock=false; OpenRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1, server=3456ee6a3164,37533,1731738278939 in 191 msec 2024-11-16T06:25:58,158 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=430970423552b1d6601a0fc13165c26b, ASSIGN in 359 msec 2024-11-16T06:25:58,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=76 2024-11-16T06:25:58,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=790d0ea9a8687ec3cb897ef67a695db1, ASSIGN in 359 msec 2024-11-16T06:25:58,160 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:25:58,161 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738358160"}]},"ts":"1731738358160"} 2024-11-16T06:25:58,162 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-16T06:25:58,163 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:25:58,164 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-16T06:25:58,168 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-16T06:25:58,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:58,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:58,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:58,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:25:58,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:58,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:58,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:58,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-16T06:25:58,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 415 msec 2024-11-16T06:25:58,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-16T06:25:58,275 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-16T06:25:58,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-16T06:25:58,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-16T06:25:58,387 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-16T06:25:58,387 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-16T06:25:58,393 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-16T06:25:58,393 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:58,394 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:25:58,396 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-16T06:25:58,403 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-16T06:25:58,410 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-16T06:25:58,412 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-16T06:25:58,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738358412 (current time:1731738358412). 2024-11-16T06:25:58,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:25:58,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-16T06:25:58,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:25:58,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f6d7513, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:58,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:58,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:58,414 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:58,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:58,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:58,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c84d531, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:58,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:58,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:58,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:58,416 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56142, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:58,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ec1dff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:58,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:58,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:58,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:58,419 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39914, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:58,420 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:58,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:58,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:58,420 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:58,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:58,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78ad0e5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:58,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:58,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:58,422 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:58,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:58,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:58,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d4ce6d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:58,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:58,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:58,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:58,424 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56160, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:58,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6683ca83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:58,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:58,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:58,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:58,428 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39916, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:58,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:25:58,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:58,431 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48014, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:58,432 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:58,433 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:58,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-16T06:25:58,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:25:58,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-16T06:25:58,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-16T06:25:58,436 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:25:58,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-16T06:25:58,437 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:25:58,440 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:25:58,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742027_1203 (size=161) 2024-11-16T06:25:58,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742027_1203 (size=161) 2024-11-16T06:25:58,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742027_1203 (size=161) 2024-11-16T06:25:58,449 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:25:58,449 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 430970423552b1d6601a0fc13165c26b}, {pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1}] 2024-11-16T06:25:58,450 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,450 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-16T06:25:58,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=83 2024-11-16T06:25:58,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=82 2024-11-16T06:25:58,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:58,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:25:58,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.HRegion(2603): Flush status journal for 430970423552b1d6601a0fc13165c26b: 2024-11-16T06:25:58,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.HRegion(2603): Flush status journal for 790d0ea9a8687ec3cb897ef67a695db1: 2024-11-16T06:25:58,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. for emptySnaptb0-testConsecutiveExports completed. 2024-11-16T06:25:58,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. for emptySnaptb0-testConsecutiveExports completed. 2024-11-16T06:25:58,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-16T06:25:58,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-16T06:25:58,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:58,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:58,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:25:58,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:25:58,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742028_1204 (size=68) 2024-11-16T06:25:58,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742028_1204 (size=68) 2024-11-16T06:25:58,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742028_1204 (size=68) 2024-11-16T06:25:58,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:58,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-16T06:25:58,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=82 2024-11-16T06:25:58,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,613 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 430970423552b1d6601a0fc13165c26b in 165 msec 2024-11-16T06:25:58,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742029_1205 (size=68) 2024-11-16T06:25:58,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742029_1205 (size=68) 2024-11-16T06:25:58,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742029_1205 (size=68) 2024-11-16T06:25:58,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:25:58,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=83 2024-11-16T06:25:58,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=83 2024-11-16T06:25:58,623 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,623 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,626 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=81 2024-11-16T06:25:58,626 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:25:58,626 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1 in 175 msec 2024-11-16T06:25:58,627 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:25:58,628 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:25:58,628 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:25:58,628 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:58,628 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-16T06:25:58,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742030_1206 (size=60) 2024-11-16T06:25:58,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742030_1206 (size=60) 2024-11-16T06:25:58,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742030_1206 (size=60) 2024-11-16T06:25:58,638 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:25:58,638 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-16T06:25:58,639 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-16T06:25:58,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742031_1207 (size=641) 2024-11-16T06:25:58,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742031_1207 (size=641) 2024-11-16T06:25:58,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742031_1207 (size=641) 2024-11-16T06:25:58,649 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:25:58,655 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:25:58,655 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-16T06:25:58,657 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:25:58,657 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-16T06:25:58,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 223 msec 2024-11-16T06:25:58,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-16T06:25:58,756 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-16T06:25:58,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:25:58,774 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37533 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:25:58,775 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-16T06:25:58,778 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-16T06:25:58,778 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:58,778 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:25:58,780 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-16T06:25:58,784 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-16T06:25:58,790 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-16T06:25:58,793 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-16T06:25:58,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738358793 (current time:1731738358793). 2024-11-16T06:25:58,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:25:58,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-16T06:25:58,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:25:58,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fa13a9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:58,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:58,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:58,794 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:58,795 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:58,795 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:58,795 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b39eb84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:58,795 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:58,795 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:58,795 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:58,796 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56186, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:58,796 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53efac98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:58,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:58,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:58,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:58,798 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39920, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:58,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:58,800 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b436dc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:25:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:25:58,801 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:25:58,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:25:58,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:25:58,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@374e6d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:58,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:25:58,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:25:58,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:58,802 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56220, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:25:58,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d97bfaa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:25:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:25:58,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:25:58,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:58,805 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39930, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:58,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:25:58,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:25:58,808 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48020, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:25:58,810 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:25:58,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:25:58,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:58,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:25:58,810 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:25:58,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-16T06:25:58,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:25:58,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-16T06:25:58,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-16T06:25:58,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-16T06:25:58,814 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:25:58,815 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:25:58,818 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:25:58,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742032_1208 (size=156) 2024-11-16T06:25:58,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742032_1208 (size=156) 2024-11-16T06:25:58,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742032_1208 (size=156) 2024-11-16T06:25:58,826 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:25:58,826 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 430970423552b1d6601a0fc13165c26b}, {pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1}] 2024-11-16T06:25:58,827 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:58,827 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:58,909 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-16T06:25:58,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-16T06:25:58,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=85 2024-11-16T06:25:58,979 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:58,980 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2902): Flushing 430970423552b1d6601a0fc13165c26b 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-16T06:25:58,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=86 2024-11-16T06:25:58,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:25:58,981 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2902): Flushing 790d0ea9a8687ec3cb897ef67a695db1 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-16T06:25:59,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411166910069de48e4b5bb03c885734c45dee_430970423552b1d6601a0fc13165c26b is 71, key is 00839d001a1316fc53d2877deb815aa8/cf:q/1731738358772/Put/seqid=0 2024-11-16T06:25:59,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241116f888c1c84ca34d3f96c21b5647d7c56f_790d0ea9a8687ec3cb897ef67a695db1 is 71, key is 104eb5a06741dae6222aae3fd99a9e12/cf:q/1731738358774/Put/seqid=0 2024-11-16T06:25:59,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742033_1209 (size=5172) 2024-11-16T06:25:59,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742033_1209 (size=5172) 2024-11-16T06:25:59,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742033_1209 (size=5172) 2024-11-16T06:25:59,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742034_1210 (size=8101) 2024-11-16T06:25:59,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742034_1210 (size=8101) 2024-11-16T06:25:59,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:59,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742034_1210 (size=8101) 2024-11-16T06:25:59,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:59,021 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411166910069de48e4b5bb03c885734c45dee_430970423552b1d6601a0fc13165c26b to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411166910069de48e4b5bb03c885734c45dee_430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:59,021 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241116f888c1c84ca34d3f96c21b5647d7c56f_790d0ea9a8687ec3cb897ef67a695db1 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241116f888c1c84ca34d3f96c21b5647d7c56f_790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:59,022 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/.tmp/cf/7351751ab5c2464f89bc7ca360a3510b, store: [table=testtb-testConsecutiveExports family=cf region=430970423552b1d6601a0fc13165c26b] 2024-11-16T06:25:59,022 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/.tmp/cf/782aa141b5ce48f599f6f09ca88f9d46, store: [table=testtb-testConsecutiveExports family=cf region=790d0ea9a8687ec3cb897ef67a695db1] 2024-11-16T06:25:59,023 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/.tmp/cf/7351751ab5c2464f89bc7ca360a3510b is 206, key is 0eefe475bb4036d0ca2ce89fcdaef1baf/cf:q/1731738358772/Put/seqid=0 2024-11-16T06:25:59,023 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/.tmp/cf/782aa141b5ce48f599f6f09ca88f9d46 is 206, key is 110d9d0662958f22f8cad453873070454/cf:q/1731738358774/Put/seqid=0 2024-11-16T06:25:59,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742036_1212 (size=14651) 2024-11-16T06:25:59,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742036_1212 (size=14651) 2024-11-16T06:25:59,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742035_1211 (size=6108) 2024-11-16T06:25:59,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742035_1211 (size=6108) 2024-11-16T06:25:59,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742035_1211 (size=6108) 2024-11-16T06:25:59,029 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/.tmp/cf/782aa141b5ce48f599f6f09ca88f9d46 2024-11-16T06:25:59,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742036_1212 (size=14651) 2024-11-16T06:25:59,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/.tmp/cf/782aa141b5ce48f599f6f09ca88f9d46 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/cf/782aa141b5ce48f599f6f09ca88f9d46 2024-11-16T06:25:59,039 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/cf/782aa141b5ce48f599f6f09ca88f9d46, entries=46, sequenceid=6, filesize=14.3 K 2024-11-16T06:25:59,040 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 790d0ea9a8687ec3cb897ef67a695db1 in 58ms, sequenceid=6, compaction requested=false 2024-11-16T06:25:59,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2603): Flush status journal for 790d0ea9a8687ec3cb897ef67a695db1: 2024-11-16T06:25:59,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. for snaptb0-testConsecutiveExports completed. 2024-11-16T06:25:59,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-16T06:25:59,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:59,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/cf/782aa141b5ce48f599f6f09ca88f9d46] hfiles 2024-11-16T06:25:59,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/cf/782aa141b5ce48f599f6f09ca88f9d46 for snapshot=snaptb0-testConsecutiveExports 2024-11-16T06:25:59,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742037_1213 (size=107) 2024-11-16T06:25:59,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742037_1213 (size=107) 2024-11-16T06:25:59,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742037_1213 (size=107) 2024-11-16T06:25:59,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:25:59,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-16T06:25:59,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=86 2024-11-16T06:25:59,047 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:59,047 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:59,049 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1 in 222 msec 2024-11-16T06:25:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-16T06:25:59,429 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/.tmp/cf/7351751ab5c2464f89bc7ca360a3510b 2024-11-16T06:25:59,436 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/.tmp/cf/7351751ab5c2464f89bc7ca360a3510b as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/cf/7351751ab5c2464f89bc7ca360a3510b 2024-11-16T06:25:59,443 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/cf/7351751ab5c2464f89bc7ca360a3510b, entries=4, sequenceid=6, filesize=6.0 K 2024-11-16T06:25:59,445 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 430970423552b1d6601a0fc13165c26b in 465ms, sequenceid=6, compaction requested=false 2024-11-16T06:25:59,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-16T06:25:59,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2603): Flush status journal for 430970423552b1d6601a0fc13165c26b: 2024-11-16T06:25:59,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. for snaptb0-testConsecutiveExports completed. 2024-11-16T06:25:59,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-16T06:25:59,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:25:59,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/cf/7351751ab5c2464f89bc7ca360a3510b] hfiles 2024-11-16T06:25:59,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/cf/7351751ab5c2464f89bc7ca360a3510b for snapshot=snaptb0-testConsecutiveExports 2024-11-16T06:25:59,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742038_1214 (size=107) 2024-11-16T06:25:59,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742038_1214 (size=107) 2024-11-16T06:25:59,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742038_1214 (size=107) 2024-11-16T06:25:59,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:25:59,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=85 2024-11-16T06:25:59,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=85 2024-11-16T06:25:59,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:59,453 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:59,456 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=84 2024-11-16T06:25:59,456 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:25:59,456 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 430970423552b1d6601a0fc13165c26b in 628 msec 2024-11-16T06:25:59,457 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:25:59,458 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:25:59,458 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:25:59,458 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:25:59,459 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241116f888c1c84ca34d3f96c21b5647d7c56f_790d0ea9a8687ec3cb897ef67a695db1, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411166910069de48e4b5bb03c885734c45dee_430970423552b1d6601a0fc13165c26b] hfiles 2024-11-16T06:25:59,459 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241116f888c1c84ca34d3f96c21b5647d7c56f_790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:25:59,459 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411166910069de48e4b5bb03c885734c45dee_430970423552b1d6601a0fc13165c26b 2024-11-16T06:25:59,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742039_1215 (size=291) 2024-11-16T06:25:59,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742039_1215 (size=291) 2024-11-16T06:25:59,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742039_1215 (size=291) 2024-11-16T06:25:59,467 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:25:59,467 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-16T06:25:59,468 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-16T06:25:59,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742040_1216 (size=951) 2024-11-16T06:25:59,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742040_1216 (size=951) 2024-11-16T06:25:59,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742040_1216 (size=951) 2024-11-16T06:25:59,482 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:25:59,489 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:25:59,490 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-16T06:25:59,491 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:25:59,492 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-16T06:25:59,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 681 msec 2024-11-16T06:25:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-16T06:25:59,956 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-16T06:25:59,956 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956 2024-11-16T06:25:59,956 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956, srcFsUri=hdfs://localhost:36767, srcDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:00,001 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:36767, inputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:00,001 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@30736072, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-16T06:26:00,004 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-16T06:26:00,011 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-16T06:26:00,044 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:00,044 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:00,045 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:01,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-5708217017713520611.jar 2024-11-16T06:26:01,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:01,075 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:01,161 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-6042998743134105557.jar 2024-11-16T06:26:01,161 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:01,162 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:01,162 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:01,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:01,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:01,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:01,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-16T06:26:01,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-16T06:26:01,165 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-16T06:26:01,165 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-16T06:26:01,166 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-16T06:26:01,166 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-16T06:26:01,167 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-16T06:26:01,167 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-16T06:26:01,167 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-16T06:26:01,168 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-16T06:26:01,168 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-16T06:26:01,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:26:01,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:26:01,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:26:01,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:26:01,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:26:01,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:26:01,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:26:01,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742041_1217 (size=131440) 2024-11-16T06:26:01,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742041_1217 (size=131440) 2024-11-16T06:26:01,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742041_1217 (size=131440) 2024-11-16T06:26:01,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742042_1218 (size=4188619) 2024-11-16T06:26:01,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742042_1218 (size=4188619) 2024-11-16T06:26:01,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742042_1218 (size=4188619) 2024-11-16T06:26:01,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742043_1219 (size=440656) 2024-11-16T06:26:01,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742043_1219 (size=440656) 2024-11-16T06:26:01,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742043_1219 (size=440656) 2024-11-16T06:26:01,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742044_1220 (size=1323991) 2024-11-16T06:26:01,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742044_1220 (size=1323991) 2024-11-16T06:26:01,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742044_1220 (size=1323991) 2024-11-16T06:26:01,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742045_1221 (size=903732) 2024-11-16T06:26:01,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742045_1221 (size=903732) 2024-11-16T06:26:01,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742045_1221 (size=903732) 2024-11-16T06:26:01,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742046_1222 (size=8360083) 2024-11-16T06:26:01,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742046_1222 (size=8360083) 2024-11-16T06:26:01,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742046_1222 (size=8360083) 2024-11-16T06:26:01,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742047_1223 (size=1877034) 2024-11-16T06:26:01,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742047_1223 (size=1877034) 2024-11-16T06:26:01,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742047_1223 (size=1877034) 2024-11-16T06:26:01,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742048_1224 (size=77835) 2024-11-16T06:26:01,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742048_1224 (size=77835) 2024-11-16T06:26:01,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742048_1224 (size=77835) 2024-11-16T06:26:01,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742049_1225 (size=30949) 2024-11-16T06:26:01,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742049_1225 (size=30949) 2024-11-16T06:26:01,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742049_1225 (size=30949) 2024-11-16T06:26:01,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742050_1226 (size=1597327) 2024-11-16T06:26:01,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742050_1226 (size=1597327) 2024-11-16T06:26:01,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742050_1226 (size=1597327) 2024-11-16T06:26:01,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742051_1227 (size=6424738) 2024-11-16T06:26:01,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742051_1227 (size=6424738) 2024-11-16T06:26:01,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742051_1227 (size=6424738) 2024-11-16T06:26:01,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742052_1228 (size=4695811) 2024-11-16T06:26:01,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742052_1228 (size=4695811) 2024-11-16T06:26:01,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742052_1228 (size=4695811) 2024-11-16T06:26:01,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742053_1229 (size=232957) 2024-11-16T06:26:01,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742053_1229 (size=232957) 2024-11-16T06:26:01,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742053_1229 (size=232957) 2024-11-16T06:26:01,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742054_1230 (size=127628) 2024-11-16T06:26:01,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742054_1230 (size=127628) 2024-11-16T06:26:01,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742054_1230 (size=127628) 2024-11-16T06:26:02,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742055_1231 (size=20406) 2024-11-16T06:26:02,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742055_1231 (size=20406) 2024-11-16T06:26:02,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742055_1231 (size=20406) 2024-11-16T06:26:02,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742056_1232 (size=5175431) 2024-11-16T06:26:02,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742056_1232 (size=5175431) 2024-11-16T06:26:02,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742056_1232 (size=5175431) 2024-11-16T06:26:02,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742057_1233 (size=217634) 2024-11-16T06:26:02,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742057_1233 (size=217634) 2024-11-16T06:26:02,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742057_1233 (size=217634) 2024-11-16T06:26:02,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742058_1234 (size=1832290) 2024-11-16T06:26:02,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742058_1234 (size=1832290) 2024-11-16T06:26:02,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742058_1234 (size=1832290) 2024-11-16T06:26:02,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742059_1235 (size=322274) 2024-11-16T06:26:02,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742059_1235 (size=322274) 2024-11-16T06:26:02,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742059_1235 (size=322274) 2024-11-16T06:26:02,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742060_1236 (size=503880) 2024-11-16T06:26:02,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742060_1236 (size=503880) 2024-11-16T06:26:02,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742060_1236 (size=503880) 2024-11-16T06:26:02,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742061_1237 (size=29229) 2024-11-16T06:26:02,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742061_1237 (size=29229) 2024-11-16T06:26:02,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742061_1237 (size=29229) 2024-11-16T06:26:02,183 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0003_000001 (auth:SIMPLE) from 127.0.0.1:58740 2024-11-16T06:26:02,204 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_1/usercache/jenkins/appcache/application_1731738285782_0003/container_1731738285782_0003_01_000001/launch_container.sh] 2024-11-16T06:26:02,204 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_1/usercache/jenkins/appcache/application_1731738285782_0003/container_1731738285782_0003_01_000001/container_tokens] 2024-11-16T06:26:02,204 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_1/usercache/jenkins/appcache/application_1731738285782_0003/container_1731738285782_0003_01_000001/sysfs] 2024-11-16T06:26:02,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742062_1238 (size=24096) 2024-11-16T06:26:02,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742062_1238 (size=24096) 2024-11-16T06:26:02,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742062_1238 (size=24096) 2024-11-16T06:26:02,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742063_1239 (size=111872) 2024-11-16T06:26:02,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742063_1239 (size=111872) 2024-11-16T06:26:02,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742063_1239 (size=111872) 2024-11-16T06:26:02,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742064_1240 (size=45609) 2024-11-16T06:26:02,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742064_1240 (size=45609) 2024-11-16T06:26:02,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742064_1240 (size=45609) 2024-11-16T06:26:02,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742065_1241 (size=136454) 2024-11-16T06:26:02,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742065_1241 (size=136454) 2024-11-16T06:26:02,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742065_1241 (size=136454) 2024-11-16T06:26:02,267 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-16T06:26:02,269 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-16T06:26:02,271 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.2 K 2024-11-16T06:26:02,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742066_1242 (size=714) 2024-11-16T06:26:02,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742066_1242 (size=714) 2024-11-16T06:26:02,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742066_1242 (size=714) 2024-11-16T06:26:02,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742067_1243 (size=15) 2024-11-16T06:26:02,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742067_1243 (size=15) 2024-11-16T06:26:02,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742067_1243 (size=15) 2024-11-16T06:26:02,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742068_1244 (size=303773) 2024-11-16T06:26:02,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742068_1244 (size=303773) 2024-11-16T06:26:02,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742068_1244 (size=303773) 2024-11-16T06:26:02,339 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:26:02,339 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:26:02,679 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0004_000001 (auth:SIMPLE) from 127.0.0.1:47666 2024-11-16T06:26:02,925 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:26:06,745 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:26:08,599 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0004_000001 (auth:SIMPLE) from 127.0.0.1:51708 2024-11-16T06:26:08,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742069_1245 (size=349423) 2024-11-16T06:26:08,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742069_1245 (size=349423) 2024-11-16T06:26:08,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742069_1245 (size=349423) 2024-11-16T06:26:10,878 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0004_000001 (auth:SIMPLE) from 127.0.0.1:39996 2024-11-16T06:26:13,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742070_1246 (size=17447) 2024-11-16T06:26:13,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742070_1246 (size=17447) 2024-11-16T06:26:13,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742070_1246 (size=17447) 2024-11-16T06:26:13,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742071_1247 (size=462) 2024-11-16T06:26:13,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742071_1247 (size=462) 2024-11-16T06:26:13,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742071_1247 (size=462) 2024-11-16T06:26:13,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742072_1248 (size=17447) 2024-11-16T06:26:13,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742072_1248 (size=17447) 2024-11-16T06:26:13,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742072_1248 (size=17447) 2024-11-16T06:26:13,866 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_1/usercache/jenkins/appcache/application_1731738285782_0004/container_1731738285782_0004_01_000002/launch_container.sh] 2024-11-16T06:26:13,866 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_1/usercache/jenkins/appcache/application_1731738285782_0004/container_1731738285782_0004_01_000002/container_tokens] 2024-11-16T06:26:13,867 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_1/usercache/jenkins/appcache/application_1731738285782_0004/container_1731738285782_0004_01_000002/sysfs] 2024-11-16T06:26:13,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742073_1249 (size=349423) 2024-11-16T06:26:13,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742073_1249 (size=349423) 2024-11-16T06:26:13,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742073_1249 (size=349423) 2024-11-16T06:26:13,898 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0004_000001 (auth:SIMPLE) from 127.0.0.1:40000 2024-11-16T06:26:15,495 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-16T06:26:15,495 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-16T06:26:15,498 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-16T06:26:15,498 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-16T06:26:15,498 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-16T06:26:15,498 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-16T06:26:15,499 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-16T06:26:15,499 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-16T06:26:15,499 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@30736072 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-16T06:26:15,499 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-16T06:26:15,499 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-16T06:26:15,500 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956, srcFsUri=hdfs://localhost:36767, srcDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:15,530 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:36767, inputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:15,530 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@30736072, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-16T06:26:15,532 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-16T06:26:15,537 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-16T06:26:15,547 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:15,548 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:15,548 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:16,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-2264101232069128899.jar 2024-11-16T06:26:16,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:16,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:16,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-7424818564079737125.jar 2024-11-16T06:26:16,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:16,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:16,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:16,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:16,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:16,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:16,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-16T06:26:16,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-16T06:26:16,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-16T06:26:16,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-16T06:26:16,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-16T06:26:16,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-16T06:26:16,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-16T06:26:16,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-16T06:26:16,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-16T06:26:16,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-16T06:26:16,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-16T06:26:16,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:26:16,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:26:16,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:26:16,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:26:16,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:26:16,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:26:16,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:26:16,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742074_1250 (size=131440) 2024-11-16T06:26:16,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742074_1250 (size=131440) 2024-11-16T06:26:16,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742074_1250 (size=131440) 2024-11-16T06:26:16,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742075_1251 (size=4188619) 2024-11-16T06:26:16,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742075_1251 (size=4188619) 2024-11-16T06:26:16,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742075_1251 (size=4188619) 2024-11-16T06:26:16,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742076_1252 (size=440656) 2024-11-16T06:26:16,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742076_1252 (size=440656) 2024-11-16T06:26:16,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742076_1252 (size=440656) 2024-11-16T06:26:16,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742077_1253 (size=1323991) 2024-11-16T06:26:16,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742077_1253 (size=1323991) 2024-11-16T06:26:16,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742077_1253 (size=1323991) 2024-11-16T06:26:16,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742078_1254 (size=903732) 2024-11-16T06:26:16,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742078_1254 (size=903732) 2024-11-16T06:26:16,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742078_1254 (size=903732) 2024-11-16T06:26:16,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742079_1255 (size=8360083) 2024-11-16T06:26:16,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742079_1255 (size=8360083) 2024-11-16T06:26:16,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742079_1255 (size=8360083) 2024-11-16T06:26:16,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742080_1256 (size=1877034) 2024-11-16T06:26:16,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742080_1256 (size=1877034) 2024-11-16T06:26:16,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742080_1256 (size=1877034) 2024-11-16T06:26:16,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742081_1257 (size=77835) 2024-11-16T06:26:16,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742081_1257 (size=77835) 2024-11-16T06:26:16,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742081_1257 (size=77835) 2024-11-16T06:26:16,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742082_1258 (size=30949) 2024-11-16T06:26:16,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742082_1258 (size=30949) 2024-11-16T06:26:16,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742082_1258 (size=30949) 2024-11-16T06:26:16,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742083_1259 (size=1597327) 2024-11-16T06:26:16,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742083_1259 (size=1597327) 2024-11-16T06:26:16,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742083_1259 (size=1597327) 2024-11-16T06:26:16,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742084_1260 (size=6424738) 2024-11-16T06:26:16,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742084_1260 (size=6424738) 2024-11-16T06:26:16,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742084_1260 (size=6424738) 2024-11-16T06:26:16,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742085_1261 (size=4695811) 2024-11-16T06:26:16,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742085_1261 (size=4695811) 2024-11-16T06:26:16,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742085_1261 (size=4695811) 2024-11-16T06:26:16,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742086_1262 (size=232957) 2024-11-16T06:26:16,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742086_1262 (size=232957) 2024-11-16T06:26:16,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742086_1262 (size=232957) 2024-11-16T06:26:16,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742087_1263 (size=127628) 2024-11-16T06:26:16,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742087_1263 (size=127628) 2024-11-16T06:26:16,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742087_1263 (size=127628) 2024-11-16T06:26:16,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742088_1264 (size=20406) 2024-11-16T06:26:16,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742088_1264 (size=20406) 2024-11-16T06:26:16,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742088_1264 (size=20406) 2024-11-16T06:26:16,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742089_1265 (size=5175431) 2024-11-16T06:26:16,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742089_1265 (size=5175431) 2024-11-16T06:26:16,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742089_1265 (size=5175431) 2024-11-16T06:26:16,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742090_1266 (size=217634) 2024-11-16T06:26:16,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742090_1266 (size=217634) 2024-11-16T06:26:16,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742090_1266 (size=217634) 2024-11-16T06:26:16,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742091_1267 (size=1832290) 2024-11-16T06:26:16,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742091_1267 (size=1832290) 2024-11-16T06:26:16,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742091_1267 (size=1832290) 2024-11-16T06:26:16,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742092_1268 (size=322274) 2024-11-16T06:26:16,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742092_1268 (size=322274) 2024-11-16T06:26:16,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742092_1268 (size=322274) 2024-11-16T06:26:16,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742093_1269 (size=503880) 2024-11-16T06:26:16,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742093_1269 (size=503880) 2024-11-16T06:26:16,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742093_1269 (size=503880) 2024-11-16T06:26:16,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742094_1270 (size=29229) 2024-11-16T06:26:16,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742094_1270 (size=29229) 2024-11-16T06:26:16,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742094_1270 (size=29229) 2024-11-16T06:26:16,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742095_1271 (size=24096) 2024-11-16T06:26:16,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742095_1271 (size=24096) 2024-11-16T06:26:16,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742095_1271 (size=24096) 2024-11-16T06:26:16,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742096_1272 (size=111872) 2024-11-16T06:26:16,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742096_1272 (size=111872) 2024-11-16T06:26:16,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742096_1272 (size=111872) 2024-11-16T06:26:16,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742097_1273 (size=45609) 2024-11-16T06:26:16,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742097_1273 (size=45609) 2024-11-16T06:26:16,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742097_1273 (size=45609) 2024-11-16T06:26:16,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742098_1274 (size=136454) 2024-11-16T06:26:16,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742098_1274 (size=136454) 2024-11-16T06:26:16,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742098_1274 (size=136454) 2024-11-16T06:26:16,835 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-16T06:26:16,837 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-16T06:26:16,839 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.2 K 2024-11-16T06:26:16,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742099_1275 (size=714) 2024-11-16T06:26:16,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742099_1275 (size=714) 2024-11-16T06:26:16,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742099_1275 (size=714) 2024-11-16T06:26:16,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742100_1276 (size=15) 2024-11-16T06:26:16,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742100_1276 (size=15) 2024-11-16T06:26:16,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742100_1276 (size=15) 2024-11-16T06:26:16,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742101_1277 (size=303773) 2024-11-16T06:26:16,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742101_1277 (size=303773) 2024-11-16T06:26:16,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742101_1277 (size=303773) 2024-11-16T06:26:19,989 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:26:19,989 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:26:19,999 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0004_000001 (auth:SIMPLE) from 127.0.0.1:48222 2024-11-16T06:26:20,011 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_0/usercache/jenkins/appcache/application_1731738285782_0004/container_1731738285782_0004_01_000001/launch_container.sh] 2024-11-16T06:26:20,011 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_0/usercache/jenkins/appcache/application_1731738285782_0004/container_1731738285782_0004_01_000001/container_tokens] 2024-11-16T06:26:20,011 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_0/usercache/jenkins/appcache/application_1731738285782_0004/container_1731738285782_0004_01_000001/sysfs] 2024-11-16T06:26:20,175 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0005_000001 (auth:SIMPLE) from 127.0.0.1:46154 2024-11-16T06:26:25,888 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0005_000001 (auth:SIMPLE) from 127.0.0.1:51714 2024-11-16T06:26:26,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742102_1278 (size=349423) 2024-11-16T06:26:26,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742102_1278 (size=349423) 2024-11-16T06:26:26,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742102_1278 (size=349423) 2024-11-16T06:26:28,212 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0005_000001 (auth:SIMPLE) from 127.0.0.1:48230 2024-11-16T06:26:31,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742103_1279 (size=16912) 2024-11-16T06:26:31,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742103_1279 (size=16912) 2024-11-16T06:26:31,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742103_1279 (size=16912) 2024-11-16T06:26:31,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742104_1280 (size=462) 2024-11-16T06:26:31,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742104_1280 (size=462) 2024-11-16T06:26:31,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742104_1280 (size=462) 2024-11-16T06:26:31,288 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_3/usercache/jenkins/appcache/application_1731738285782_0005/container_1731738285782_0005_01_000002/launch_container.sh] 2024-11-16T06:26:31,288 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_3/usercache/jenkins/appcache/application_1731738285782_0005/container_1731738285782_0005_01_000002/container_tokens] 2024-11-16T06:26:31,288 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_3/usercache/jenkins/appcache/application_1731738285782_0005/container_1731738285782_0005_01_000002/sysfs] 2024-11-16T06:26:31,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742105_1281 (size=16912) 2024-11-16T06:26:31,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742105_1281 (size=16912) 2024-11-16T06:26:31,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742105_1281 (size=16912) 2024-11-16T06:26:31,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742106_1282 (size=349423) 2024-11-16T06:26:31,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742106_1282 (size=349423) 2024-11-16T06:26:31,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742106_1282 (size=349423) 2024-11-16T06:26:31,350 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0005_000001 (auth:SIMPLE) from 127.0.0.1:60480 2024-11-16T06:26:33,201 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-16T06:26:33,201 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-16T06:26:33,203 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-16T06:26:33,204 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-16T06:26:33,204 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-16T06:26:33,204 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-16T06:26:33,205 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-16T06:26:33,205 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-16T06:26:33,205 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@30736072 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-16T06:26:33,205 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-16T06:26:33,205 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738359956/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-16T06:26:33,219 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-11-16T06:26:33,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-16T06:26:33,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-16T06:26:33,222 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738393222"}]},"ts":"1731738393222"} 2024-11-16T06:26:33,224 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-16T06:26:33,224 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-16T06:26:33,225 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-16T06:26:33,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=430970423552b1d6601a0fc13165c26b, UNASSIGN}, {pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=790d0ea9a8687ec3cb897ef67a695db1, UNASSIGN}] 2024-11-16T06:26:33,228 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=790d0ea9a8687ec3cb897ef67a695db1, UNASSIGN 2024-11-16T06:26:33,228 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=430970423552b1d6601a0fc13165c26b, UNASSIGN 2024-11-16T06:26:33,228 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=430970423552b1d6601a0fc13165c26b, regionState=CLOSING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:26:33,228 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=790d0ea9a8687ec3cb897ef67a695db1, regionState=CLOSING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:33,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=790d0ea9a8687ec3cb897ef67a695db1, UNASSIGN because future has completed 2024-11-16T06:26:33,231 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:26:33,231 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:26:33,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=430970423552b1d6601a0fc13165c26b, UNASSIGN because future has completed 2024-11-16T06:26:33,232 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:26:33,232 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=92, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure 430970423552b1d6601a0fc13165c26b, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:26:33,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-16T06:26:33,384 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(122): Close 430970423552b1d6601a0fc13165c26b 2024-11-16T06:26:33,384 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(122): Close 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:26:33,385 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:26:33,385 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:26:33,385 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1722): Closing 790d0ea9a8687ec3cb897ef67a695db1, disabling compactions & flushes 2024-11-16T06:26:33,385 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1722): Closing 430970423552b1d6601a0fc13165c26b, disabling compactions & flushes 2024-11-16T06:26:33,385 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:26:33,385 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:26:33,385 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:26:33,385 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:26:33,385 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. after waiting 0 ms 2024-11-16T06:26:33,385 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. after waiting 0 ms 2024-11-16T06:26:33,385 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:26:33,385 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:26:33,394 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:26:33,394 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:26:33,395 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:26:33,395 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:26:33,395 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b. 2024-11-16T06:26:33,395 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1. 2024-11-16T06:26:33,395 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1676): Region close journal for 430970423552b1d6601a0fc13165c26b: Waiting for close lock at 1731738393385Running coprocessor pre-close hooks at 1731738393385Disabling compacts and flushes for region at 1731738393385Disabling writes for close at 1731738393385Writing region close event to WAL at 1731738393387 (+2 ms)Running coprocessor post-close hooks at 1731738393395 (+8 ms)Closed at 1731738393395 2024-11-16T06:26:33,395 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1676): Region close journal for 790d0ea9a8687ec3cb897ef67a695db1: Waiting for close lock at 1731738393385Running coprocessor pre-close hooks at 1731738393385Disabling compacts and flushes for region at 1731738393385Disabling writes for close at 1731738393385Writing region close event to WAL at 1731738393387 (+2 ms)Running coprocessor post-close hooks at 1731738393395 (+8 ms)Closed at 1731738393395 2024-11-16T06:26:33,397 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(157): Closed 790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:26:33,398 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=790d0ea9a8687ec3cb897ef67a695db1, regionState=CLOSED 2024-11-16T06:26:33,398 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(157): Closed 430970423552b1d6601a0fc13165c26b 2024-11-16T06:26:33,399 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=430970423552b1d6601a0fc13165c26b, regionState=CLOSED 2024-11-16T06:26:33,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:26:33,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure 430970423552b1d6601a0fc13165c26b, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:26:33,403 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-11-16T06:26:33,403 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; CloseRegionProcedure 790d0ea9a8687ec3cb897ef67a695db1, server=3456ee6a3164,37533,1731738278939 in 170 msec 2024-11-16T06:26:33,404 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=89 2024-11-16T06:26:33,404 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=89, state=SUCCESS, hasLock=false; CloseRegionProcedure 430970423552b1d6601a0fc13165c26b, server=3456ee6a3164,35939,1731738278639 in 170 msec 2024-11-16T06:26:33,405 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=790d0ea9a8687ec3cb897ef67a695db1, UNASSIGN in 177 msec 2024-11-16T06:26:33,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=89, resume processing ppid=88 2024-11-16T06:26:33,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=430970423552b1d6601a0fc13165c26b, UNASSIGN in 178 msec 2024-11-16T06:26:33,409 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=88, resume processing ppid=87 2024-11-16T06:26:33,409 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, ppid=87, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 182 msec 2024-11-16T06:26:33,410 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738393410"}]},"ts":"1731738393410"} 2024-11-16T06:26:33,412 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-16T06:26:33,412 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-16T06:26:33,415 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 194 msec 2024-11-16T06:26:33,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-16T06:26:33,535 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-16T06:26:33,535 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-11-16T06:26:33,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-16T06:26:33,537 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-16T06:26:33,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-16T06:26:33,538 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-16T06:26:33,541 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-16T06:26:33,543 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:26:33,543 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b 2024-11-16T06:26:33,546 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/recovered.edits] 2024-11-16T06:26:33,547 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/recovered.edits] 2024-11-16T06:26:33,552 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/cf/782aa141b5ce48f599f6f09ca88f9d46 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/cf/782aa141b5ce48f599f6f09ca88f9d46 2024-11-16T06:26:33,552 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/cf/7351751ab5c2464f89bc7ca360a3510b to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/cf/7351751ab5c2464f89bc7ca360a3510b 2024-11-16T06:26:33,754 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1/recovered.edits/9.seqid 2024-11-16T06:26:33,754 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b/recovered.edits/9.seqid 2024-11-16T06:26:33,755 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:26:33,755 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testConsecutiveExports/430970423552b1d6601a0fc13165c26b 2024-11-16T06:26:33,755 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-16T06:26:33,755 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-11-16T06:26:33,756 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-11-16T06:26:33,759 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241116f888c1c84ca34d3f96c21b5647d7c56f_790d0ea9a8687ec3cb897ef67a695db1 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241116f888c1c84ca34d3f96c21b5647d7c56f_790d0ea9a8687ec3cb897ef67a695db1 2024-11-16T06:26:33,760 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411166910069de48e4b5bb03c885734c45dee_430970423552b1d6601a0fc13165c26b to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411166910069de48e4b5bb03c885734c45dee_430970423552b1d6601a0fc13165c26b 2024-11-16T06:26:33,761 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-11-16T06:26:33,866 INFO [AsyncFSWAL-0-hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData-prefix:3456ee6a3164,39381,1731738277818 {}] wal.AbstractFSWAL(1368): Slow sync cost: 103 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40907,DS-3defb6be-028a-4c36-bf76-0bb3d6ff9764,DISK], DatanodeInfoWithStorage[127.0.0.1:36185,DS-f00f7888-c79b-4a2a-b682-4301f96bc86d,DISK], DatanodeInfoWithStorage[127.0.0.1:33197,DS-1ebc5369-9ac9-4d7a-92d4-567a577c8101,DISK]] 2024-11-16T06:26:33,866 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-16T06:26:33,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-16T06:26:33,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-16T06:26:33,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-16T06:26:33,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-16T06:26:33,874 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-16T06:26:33,874 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-16T06:26:33,874 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-16T06:26:33,874 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-16T06:26:33,875 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-16T06:26:33,878 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-16T06:26:33,880 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-16T06:26:33,880 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-16T06:26:33,880 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738393880"}]},"ts":"9223372036854775807"} 2024-11-16T06:26:33,880 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738393880"}]},"ts":"9223372036854775807"} 2024-11-16T06:26:33,882 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-16T06:26:33,882 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 430970423552b1d6601a0fc13165c26b, NAME => 'testtb-testConsecutiveExports,,1731738357757.430970423552b1d6601a0fc13165c26b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 790d0ea9a8687ec3cb897ef67a695db1, NAME => 'testtb-testConsecutiveExports,1,1731738357757.790d0ea9a8687ec3cb897ef67a695db1.', STARTKEY => '1', ENDKEY => ''}] 2024-11-16T06:26:33,882 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-16T06:26:33,882 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731738393882"}]},"ts":"9223372036854775807"} 2024-11-16T06:26:33,884 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-16T06:26:33,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-16T06:26:33,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-16T06:26:33,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:33,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:33,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-16T06:26:33,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-16T06:26:33,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:33,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:33,885 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-16T06:26:33,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-16T06:26:33,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 350 msec 2024-11-16T06:26:33,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-16T06:26:33,997 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-16T06:26:33,997 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-16T06:26:34,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-16T06:26:34,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-16T06:26:34,016 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-16T06:26:34,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-16T06:26:34,043 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=795 (was 786) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:59852 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4024 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:57380 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:40618 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 122049) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (692179358) connection to localhost/127.0.0.1:38559 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (692179358) connection to localhost/127.0.0.1:39073 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1417973662_1 at /127.0.0.1:57362 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1417973662_1 at /127.0.0.1:40608 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=815 (was 806) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=660 (was 699), ProcessCount=19 (was 19), AvailableMemoryMB=453 (was 623) 2024-11-16T06:26:34,043 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-11-16T06:26:34,061 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=795, OpenFileDescriptor=815, MaxFileDescriptor=1048576, SystemLoadAverage=660, ProcessCount=19, AvailableMemoryMB=453 2024-11-16T06:26:34,062 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-11-16T06:26:34,063 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:26:34,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:34,065 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:26:34,065 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 94 2024-11-16T06:26:34,066 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:26:34,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-16T06:26:34,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742107_1283 (size=458) 2024-11-16T06:26:34,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742107_1283 (size=458) 2024-11-16T06:26:34,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742107_1283 (size=458) 2024-11-16T06:26:34,074 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4f48e9ca11073ed9d5885225fa800cb0, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:34,074 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 2bf927a7e914904d7d25f15851efde52, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:34,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742109_1285 (size=83) 2024-11-16T06:26:34,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742109_1285 (size=83) 2024-11-16T06:26:34,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742109_1285 (size=83) 2024-11-16T06:26:34,085 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:34,085 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 2bf927a7e914904d7d25f15851efde52, disabling compactions & flushes 2024-11-16T06:26:34,085 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:34,085 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:34,085 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. after waiting 0 ms 2024-11-16T06:26:34,085 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:34,085 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:34,085 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 2bf927a7e914904d7d25f15851efde52: Waiting for close lock at 1731738394085Disabling compacts and flushes for region at 1731738394085Disabling writes for close at 1731738394085Writing region close event to WAL at 1731738394085Closed at 1731738394085 2024-11-16T06:26:34,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742108_1284 (size=83) 2024-11-16T06:26:34,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742108_1284 (size=83) 2024-11-16T06:26:34,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742108_1284 (size=83) 2024-11-16T06:26:34,087 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:34,087 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 4f48e9ca11073ed9d5885225fa800cb0, disabling compactions & flushes 2024-11-16T06:26:34,087 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:34,087 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:34,087 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. after waiting 0 ms 2024-11-16T06:26:34,087 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:34,087 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:34,087 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4f48e9ca11073ed9d5885225fa800cb0: Waiting for close lock at 1731738394087Disabling compacts and flushes for region at 1731738394087Disabling writes for close at 1731738394087Writing region close event to WAL at 1731738394087Closed at 1731738394087 2024-11-16T06:26:34,088 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:26:34,088 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1731738394088"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738394088"}]},"ts":"1731738394088"} 2024-11-16T06:26:34,088 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1731738394088"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738394088"}]},"ts":"1731738394088"} 2024-11-16T06:26:34,090 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-16T06:26:34,091 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:26:34,091 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738394091"}]},"ts":"1731738394091"} 2024-11-16T06:26:34,093 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-16T06:26:34,093 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:26:34,094 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:26:34,094 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:26:34,094 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:26:34,094 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:26:34,094 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:26:34,094 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:26:34,094 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:26:34,094 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:26:34,094 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:26:34,094 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:26:34,095 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4f48e9ca11073ed9d5885225fa800cb0, ASSIGN}, {pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2bf927a7e914904d7d25f15851efde52, ASSIGN}] 2024-11-16T06:26:34,096 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2bf927a7e914904d7d25f15851efde52, ASSIGN 2024-11-16T06:26:34,096 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4f48e9ca11073ed9d5885225fa800cb0, ASSIGN 2024-11-16T06:26:34,096 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2bf927a7e914904d7d25f15851efde52, ASSIGN; state=OFFLINE, location=3456ee6a3164,40155,1731738278871; forceNewPlan=false, retain=false 2024-11-16T06:26:34,096 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4f48e9ca11073ed9d5885225fa800cb0, ASSIGN; state=OFFLINE, location=3456ee6a3164,37533,1731738278939; forceNewPlan=false, retain=false 2024-11-16T06:26:34,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-16T06:26:34,247 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-16T06:26:34,247 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=2bf927a7e914904d7d25f15851efde52, regionState=OPENING, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:26:34,247 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=4f48e9ca11073ed9d5885225fa800cb0, regionState=OPENING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:34,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2bf927a7e914904d7d25f15851efde52, ASSIGN because future has completed 2024-11-16T06:26:34,249 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2bf927a7e914904d7d25f15851efde52, server=3456ee6a3164,40155,1731738278871}] 2024-11-16T06:26:34,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4f48e9ca11073ed9d5885225fa800cb0, ASSIGN because future has completed 2024-11-16T06:26:34,250 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=98, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:26:34,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-16T06:26:34,406 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:34,406 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7752): Opening region: {ENCODED => 4f48e9ca11073ed9d5885225fa800cb0, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0.', STARTKEY => '', ENDKEY => '1'} 2024-11-16T06:26:34,406 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. service=AccessControlService 2024-11-16T06:26:34,407 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:26:34,407 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,407 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:34,407 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7794): checking encryption for 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,407 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7797): checking classloading for 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,409 INFO [StoreOpener-4f48e9ca11073ed9d5885225fa800cb0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,411 INFO [StoreOpener-4f48e9ca11073ed9d5885225fa800cb0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4f48e9ca11073ed9d5885225fa800cb0 columnFamilyName cf 2024-11-16T06:26:34,411 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:34,411 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7752): Opening region: {ENCODED => 2bf927a7e914904d7d25f15851efde52, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52.', STARTKEY => '1', ENDKEY => ''} 2024-11-16T06:26:34,412 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. service=AccessControlService 2024-11-16T06:26:34,412 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:26:34,412 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,412 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:34,412 DEBUG [StoreOpener-4f48e9ca11073ed9d5885225fa800cb0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:34,412 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7794): checking encryption for 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,412 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7797): checking classloading for 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,413 INFO [StoreOpener-4f48e9ca11073ed9d5885225fa800cb0-1 {}] regionserver.HStore(327): Store=4f48e9ca11073ed9d5885225fa800cb0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:26:34,413 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1038): replaying wal for 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,414 INFO [StoreOpener-2bf927a7e914904d7d25f15851efde52-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,414 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,414 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,415 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1048): stopping wal replay for 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,415 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1060): Cleaning up temporary data for 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,416 INFO [StoreOpener-2bf927a7e914904d7d25f15851efde52-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2bf927a7e914904d7d25f15851efde52 columnFamilyName cf 2024-11-16T06:26:34,417 DEBUG [StoreOpener-2bf927a7e914904d7d25f15851efde52-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:34,417 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1093): writing seq id for 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,417 INFO [StoreOpener-2bf927a7e914904d7d25f15851efde52-1 {}] regionserver.HStore(327): Store=2bf927a7e914904d7d25f15851efde52/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:26:34,418 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1038): replaying wal for 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,420 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,420 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:26:34,421 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1114): Opened 4f48e9ca11073ed9d5885225fa800cb0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64928520, jitterRate=-0.032489657402038574}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:26:34,421 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,421 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,422 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1048): stopping wal replay for 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,422 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1060): Cleaning up temporary data for 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,422 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1006): Region open journal for 4f48e9ca11073ed9d5885225fa800cb0: Running coprocessor pre-open hook at 1731738394407Writing region info on filesystem at 1731738394407Initializing all the Stores at 1731738394408 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738394408Cleaning up temporary data from old regions at 1731738394415 (+7 ms)Running coprocessor post-open hooks at 1731738394421 (+6 ms)Region opened successfully at 1731738394422 (+1 ms) 2024-11-16T06:26:34,423 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0., pid=98, masterSystemTime=1731738394402 2024-11-16T06:26:34,424 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1093): writing seq id for 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,425 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:34,425 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:34,425 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=4f48e9ca11073ed9d5885225fa800cb0, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:34,427 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:26:34,428 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1114): Opened 2bf927a7e914904d7d25f15851efde52; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74864422, jitterRate=0.11556681990623474}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:26:34,428 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,428 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1006): Region open journal for 2bf927a7e914904d7d25f15851efde52: Running coprocessor pre-open hook at 1731738394413Writing region info on filesystem at 1731738394413Initializing all the Stores at 1731738394413Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738394413Cleaning up temporary data from old regions at 1731738394422 (+9 ms)Running coprocessor post-open hooks at 1731738394428 (+6 ms)Region opened successfully at 1731738394428 2024-11-16T06:26:34,429 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52., pid=97, masterSystemTime=1731738394401 2024-11-16T06:26:34,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:26:34,433 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:34,433 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:34,434 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=2bf927a7e914904d7d25f15851efde52, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:26:34,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2bf927a7e914904d7d25f15851efde52, server=3456ee6a3164,40155,1731738278871 because future has completed 2024-11-16T06:26:34,438 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=98, resume processing ppid=95 2024-11-16T06:26:34,438 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=95, state=SUCCESS, hasLock=false; OpenRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0, server=3456ee6a3164,37533,1731738278939 in 183 msec 2024-11-16T06:26:34,446 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-11-16T06:26:34,446 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; OpenRegionProcedure 2bf927a7e914904d7d25f15851efde52, server=3456ee6a3164,40155,1731738278871 in 189 msec 2024-11-16T06:26:34,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4f48e9ca11073ed9d5885225fa800cb0, ASSIGN in 344 msec 2024-11-16T06:26:34,448 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=96, resume processing ppid=94 2024-11-16T06:26:34,448 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2bf927a7e914904d7d25f15851efde52, ASSIGN in 352 msec 2024-11-16T06:26:34,449 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:26:34,449 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738394449"}]},"ts":"1731738394449"} 2024-11-16T06:26:34,451 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-16T06:26:34,452 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:26:34,452 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-16T06:26:34,455 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-16T06:26:34,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:34,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:34,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:34,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:34,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:34,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:34,460 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:34,460 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:34,461 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 397 msec 2024-11-16T06:26:34,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-16T06:26:34,695 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-16T06:26:34,695 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-16T06:26:34,698 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:34,698 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:34,698 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:26:34,700 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-16T06:26:34,705 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-16T06:26:34,714 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-16T06:26:34,717 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-16T06:26:34,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738394717 (current time:1731738394717). 2024-11-16T06:26:34,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:26:34,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-16T06:26:34,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:26:34,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f74016d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:34,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:26:34,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:26:34,721 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:26:34,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:26:34,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:26:34,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50b3170f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:34,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:26:34,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:26:34,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:34,723 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40446, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:26:34,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2349d4fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:34,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:26:34,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:26:34,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:34,726 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39068, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:34,728 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:26:34,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:26:34,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:34,728 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:26:34,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:34,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a36782f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:34,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:26:34,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:26:34,730 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:26:34,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:26:34,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:26:34,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79911ce0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:34,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:26:34,731 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:26:34,731 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:34,732 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40468, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:26:34,733 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5715bfef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:34,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:26:34,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:26:34,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:34,735 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39084, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:34,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:26:34,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:34,740 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42858, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:34,741 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:26:34,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:26:34,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:34,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:34,742 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:26:34,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-16T06:26:34,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:26:34,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-16T06:26:34,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-16T06:26:34,745 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:26:34,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-16T06:26:34,745 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:26:34,747 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:26:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742110_1286 (size=215) 2024-11-16T06:26:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742110_1286 (size=215) 2024-11-16T06:26:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742110_1286 (size=215) 2024-11-16T06:26:34,755 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:26:34,755 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0}, {pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bf927a7e914904d7d25f15851efde52}] 2024-11-16T06:26:34,756 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,756 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-16T06:26:34,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=100 2024-11-16T06:26:34,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=101 2024-11-16T06:26:34,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:34,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:34,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.HRegion(2603): Flush status journal for 2bf927a7e914904d7d25f15851efde52: 2024-11-16T06:26:34,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.HRegion(2603): Flush status journal for 4f48e9ca11073ed9d5885225fa800cb0: 2024-11-16T06:26:34,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-16T06:26:34,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-16T06:26:34,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:34,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:26:34,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:34,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:26:34,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:26:34,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:26:34,912 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-16T06:26:34,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742111_1287 (size=86) 2024-11-16T06:26:34,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742111_1287 (size=86) 2024-11-16T06:26:34,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742111_1287 (size=86) 2024-11-16T06:26:34,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:34,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-16T06:26:34,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=101 2024-11-16T06:26:34,915 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,916 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:34,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2bf927a7e914904d7d25f15851efde52 in 161 msec 2024-11-16T06:26:34,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742112_1288 (size=86) 2024-11-16T06:26:34,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742112_1288 (size=86) 2024-11-16T06:26:34,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742112_1288 (size=86) 2024-11-16T06:26:34,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:34,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=100 2024-11-16T06:26:34,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=100 2024-11-16T06:26:34,921 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,921 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:34,924 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=99 2024-11-16T06:26:34,924 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:26:34,924 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0 in 167 msec 2024-11-16T06:26:34,924 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:26:34,925 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:26:34,925 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:26:34,925 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:34,926 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-16T06:26:34,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742113_1289 (size=78) 2024-11-16T06:26:34,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742113_1289 (size=78) 2024-11-16T06:26:34,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742113_1289 (size=78) 2024-11-16T06:26:34,932 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:26:34,932 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:34,933 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:34,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742114_1290 (size=713) 2024-11-16T06:26:34,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742114_1290 (size=713) 2024-11-16T06:26:34,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742114_1290 (size=713) 2024-11-16T06:26:34,947 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:26:34,952 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:26:34,953 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:34,954 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:26:34,954 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-16T06:26:34,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 211 msec 2024-11-16T06:26:35,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-16T06:26:35,066 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-16T06:26:35,075 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37533 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:26:35,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40155 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:26:35,079 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-16T06:26:35,081 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:35,081 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:35,082 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:26:35,084 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-16T06:26:35,090 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-16T06:26:35,099 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-16T06:26:35,102 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-16T06:26:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738395102 (current time:1731738395102). 2024-11-16T06:26:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:26:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-16T06:26:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:26:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3375c677, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:26:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:26:35,103 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:26:35,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:26:35,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:26:35,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f865e1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:35,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:26:35,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:26:35,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:35,104 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40486, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:26:35,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40c3a3fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:35,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:26:35,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:26:35,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:35,107 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39100, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:35,109 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:26:35,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:26:35,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:35,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:35,109 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:26:35,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a16462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:35,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:26:35,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:26:35,110 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:26:35,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:26:35,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:26:35,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c995d39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:35,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:26:35,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:26:35,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:35,112 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40498, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:26:35,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@375b5e62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:35,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:26:35,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:26:35,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:35,115 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:35,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:26:35,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:35,118 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42868, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:35,119 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:26:35,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:26:35,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:35,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:35,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-16T06:26:35,120 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:26:35,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:26:35,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-16T06:26:35,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-16T06:26:35,123 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:26:35,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-16T06:26:35,124 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:26:35,126 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:26:35,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742115_1291 (size=210) 2024-11-16T06:26:35,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742115_1291 (size=210) 2024-11-16T06:26:35,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742115_1291 (size=210) 2024-11-16T06:26:35,137 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:26:35,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0}, {pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bf927a7e914904d7d25f15851efde52}] 2024-11-16T06:26:35,138 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:35,138 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:35,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-16T06:26:35,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=104 2024-11-16T06:26:35,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-16T06:26:35,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:35,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:35,290 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2902): Flushing 4f48e9ca11073ed9d5885225fa800cb0 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-16T06:26:35,290 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2902): Flushing 2bf927a7e914904d7d25f15851efde52 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-16T06:26:35,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411168f9435fbddf3459c8bdd74f08d13fe57_2bf927a7e914904d7d25f15851efde52 is 71, key is 12951143488b0a33f996aed14c45b7db/cf:q/1731738395077/Put/seqid=0 2024-11-16T06:26:35,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241116cb85619b713f45ddb2f0bee7b9a526ef_4f48e9ca11073ed9d5885225fa800cb0 is 71, key is 0912ae2f49d0496a01efde67d7f8c0e0/cf:q/1731738395075/Put/seqid=0 2024-11-16T06:26:35,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742116_1292 (size=8171) 2024-11-16T06:26:35,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742116_1292 (size=8171) 2024-11-16T06:26:35,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742116_1292 (size=8171) 2024-11-16T06:26:35,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:35,320 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411168f9435fbddf3459c8bdd74f08d13fe57_2bf927a7e914904d7d25f15851efde52 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411168f9435fbddf3459c8bdd74f08d13fe57_2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:35,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/.tmp/cf/2d959ee09eed454daf5378ad93a478d1, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=2bf927a7e914904d7d25f15851efde52] 2024-11-16T06:26:35,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/.tmp/cf/2d959ee09eed454daf5378ad93a478d1 is 224, key is 130f15bd58cb385e6550863c8a4eb3cc7/cf:q/1731738395077/Put/seqid=0 2024-11-16T06:26:35,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742117_1293 (size=5102) 2024-11-16T06:26:35,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742117_1293 (size=5102) 2024-11-16T06:26:35,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742117_1293 (size=5102) 2024-11-16T06:26:35,324 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:35,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742118_1294 (size=15717) 2024-11-16T06:26:35,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742118_1294 (size=15717) 2024-11-16T06:26:35,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742118_1294 (size=15717) 2024-11-16T06:26:35,327 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/.tmp/cf/2d959ee09eed454daf5378ad93a478d1 2024-11-16T06:26:35,329 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241116cb85619b713f45ddb2f0bee7b9a526ef_4f48e9ca11073ed9d5885225fa800cb0 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241116cb85619b713f45ddb2f0bee7b9a526ef_4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:35,330 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/.tmp/cf/c701f41d6b16469c92f0e092fd089bb3, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=4f48e9ca11073ed9d5885225fa800cb0] 2024-11-16T06:26:35,330 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/.tmp/cf/c701f41d6b16469c92f0e092fd089bb3 is 224, key is 0563e0832f9711c441188ab34624a9b15/cf:q/1731738395075/Put/seqid=0 2024-11-16T06:26:35,332 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/.tmp/cf/2d959ee09eed454daf5378ad93a478d1 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/cf/2d959ee09eed454daf5378ad93a478d1 2024-11-16T06:26:35,337 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/cf/2d959ee09eed454daf5378ad93a478d1, entries=47, sequenceid=6, filesize=15.3 K 2024-11-16T06:26:35,338 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 2bf927a7e914904d7d25f15851efde52 in 48ms, sequenceid=6, compaction requested=false 2024-11-16T06:26:35,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2603): Flush status journal for 2bf927a7e914904d7d25f15851efde52: 2024-11-16T06:26:35,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-16T06:26:35,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:35,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:26:35,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/cf/2d959ee09eed454daf5378ad93a478d1] hfiles 2024-11-16T06:26:35,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/cf/2d959ee09eed454daf5378ad93a478d1 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:35,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742119_1295 (size=5978) 2024-11-16T06:26:35,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742119_1295 (size=5978) 2024-11-16T06:26:35,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742119_1295 (size=5978) 2024-11-16T06:26:35,343 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/.tmp/cf/c701f41d6b16469c92f0e092fd089bb3 2024-11-16T06:26:35,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742120_1296 (size=125) 2024-11-16T06:26:35,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742120_1296 (size=125) 2024-11-16T06:26:35,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742120_1296 (size=125) 2024-11-16T06:26:35,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:35,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=104 2024-11-16T06:26:35,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=104 2024-11-16T06:26:35,348 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:35,348 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:35,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2bf927a7e914904d7d25f15851efde52 in 211 msec 2024-11-16T06:26:35,351 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/.tmp/cf/c701f41d6b16469c92f0e092fd089bb3 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/cf/c701f41d6b16469c92f0e092fd089bb3 2024-11-16T06:26:35,356 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/cf/c701f41d6b16469c92f0e092fd089bb3, entries=3, sequenceid=6, filesize=5.8 K 2024-11-16T06:26:35,357 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 4f48e9ca11073ed9d5885225fa800cb0 in 67ms, sequenceid=6, compaction requested=false 2024-11-16T06:26:35,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 4f48e9ca11073ed9d5885225fa800cb0: 2024-11-16T06:26:35,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-16T06:26:35,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:35,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:26:35,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/cf/c701f41d6b16469c92f0e092fd089bb3] hfiles 2024-11-16T06:26:35,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/cf/c701f41d6b16469c92f0e092fd089bb3 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:35,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742121_1297 (size=125) 2024-11-16T06:26:35,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742121_1297 (size=125) 2024-11-16T06:26:35,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742121_1297 (size=125) 2024-11-16T06:26:35,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:35,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-16T06:26:35,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-16T06:26:35,366 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:35,366 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:35,369 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=102 2024-11-16T06:26:35,369 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:26:35,369 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0 in 230 msec 2024-11-16T06:26:35,370 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:26:35,371 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:26:35,371 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:26:35,371 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:35,372 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411168f9435fbddf3459c8bdd74f08d13fe57_2bf927a7e914904d7d25f15851efde52, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241116cb85619b713f45ddb2f0bee7b9a526ef_4f48e9ca11073ed9d5885225fa800cb0] hfiles 2024-11-16T06:26:35,372 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411168f9435fbddf3459c8bdd74f08d13fe57_2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:35,372 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241116cb85619b713f45ddb2f0bee7b9a526ef_4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:35,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742122_1298 (size=309) 2024-11-16T06:26:35,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742122_1298 (size=309) 2024-11-16T06:26:35,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742122_1298 (size=309) 2024-11-16T06:26:35,379 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:26:35,380 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:35,380 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:35,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742123_1299 (size=1023) 2024-11-16T06:26:35,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742123_1299 (size=1023) 2024-11-16T06:26:35,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742123_1299 (size=1023) 2024-11-16T06:26:35,394 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:26:35,399 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:26:35,400 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:35,401 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:26:35,401 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-16T06:26:35,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 280 msec 2024-11-16T06:26:35,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-16T06:26:35,444 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-16T06:26:35,461 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T06:26:35,462 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T06:26:35,463 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T06:26:35,463 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42882, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T06:26:35,463 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39122, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T06:26:35,464 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40172, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T06:26:35,464 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40155 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-16T06:26:35,464 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37533 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-16T06:26:35,464 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-16T06:26:35,466 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:26:35,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:35,468 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:26:35,468 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:35,468 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 105 2024-11-16T06:26:35,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-16T06:26:35,469 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:26:35,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742124_1300 (size=399) 2024-11-16T06:26:35,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742124_1300 (size=399) 2024-11-16T06:26:35,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742124_1300 (size=399) 2024-11-16T06:26:35,479 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b506bdc55c121092a6b415b5bcd45705, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:35,479 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1ffa12be9f21008004857405922eb16f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:35,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742125_1301 (size=85) 2024-11-16T06:26:35,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742125_1301 (size=85) 2024-11-16T06:26:35,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742125_1301 (size=85) 2024-11-16T06:26:35,499 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:35,500 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing b506bdc55c121092a6b415b5bcd45705, disabling compactions & flushes 2024-11-16T06:26:35,500 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. 2024-11-16T06:26:35,500 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. 2024-11-16T06:26:35,500 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. after waiting 0 ms 2024-11-16T06:26:35,500 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. 2024-11-16T06:26:35,500 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. 2024-11-16T06:26:35,500 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for b506bdc55c121092a6b415b5bcd45705: Waiting for close lock at 1731738395500Disabling compacts and flushes for region at 1731738395500Disabling writes for close at 1731738395500Writing region close event to WAL at 1731738395500Closed at 1731738395500 2024-11-16T06:26:35,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742126_1302 (size=85) 2024-11-16T06:26:35,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742126_1302 (size=85) 2024-11-16T06:26:35,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742126_1302 (size=85) 2024-11-16T06:26:35,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:35,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 1ffa12be9f21008004857405922eb16f, disabling compactions & flushes 2024-11-16T06:26:35,503 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. 2024-11-16T06:26:35,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. 2024-11-16T06:26:35,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. after waiting 0 ms 2024-11-16T06:26:35,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. 2024-11-16T06:26:35,503 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. 2024-11-16T06:26:35,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 1ffa12be9f21008004857405922eb16f: Waiting for close lock at 1731738395503Disabling compacts and flushes for region at 1731738395503Disabling writes for close at 1731738395503Writing region close event to WAL at 1731738395503Closed at 1731738395503 2024-11-16T06:26:35,504 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:26:35,505 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1731738395504"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738395504"}]},"ts":"1731738395504"} 2024-11-16T06:26:35,505 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1731738395504"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738395504"}]},"ts":"1731738395504"} 2024-11-16T06:26:35,507 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-16T06:26:35,508 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:26:35,508 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738395508"}]},"ts":"1731738395508"} 2024-11-16T06:26:35,510 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-16T06:26:35,510 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:26:35,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:26:35,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:26:35,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:26:35,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:26:35,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:26:35,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:26:35,511 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:26:35,511 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:26:35,511 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:26:35,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:26:35,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b506bdc55c121092a6b415b5bcd45705, ASSIGN}, {pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1ffa12be9f21008004857405922eb16f, ASSIGN}] 2024-11-16T06:26:35,513 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b506bdc55c121092a6b415b5bcd45705, ASSIGN 2024-11-16T06:26:35,513 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1ffa12be9f21008004857405922eb16f, ASSIGN 2024-11-16T06:26:35,513 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b506bdc55c121092a6b415b5bcd45705, ASSIGN; state=OFFLINE, location=3456ee6a3164,37533,1731738278939; forceNewPlan=false, retain=false 2024-11-16T06:26:35,514 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1ffa12be9f21008004857405922eb16f, ASSIGN; state=OFFLINE, location=3456ee6a3164,35939,1731738278639; forceNewPlan=false, retain=false 2024-11-16T06:26:35,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-16T06:26:35,664 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-16T06:26:35,664 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=1ffa12be9f21008004857405922eb16f, regionState=OPENING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:26:35,665 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=b506bdc55c121092a6b415b5bcd45705, regionState=OPENING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:35,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1ffa12be9f21008004857405922eb16f, ASSIGN because future has completed 2024-11-16T06:26:35,668 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ffa12be9f21008004857405922eb16f, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:26:35,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b506bdc55c121092a6b415b5bcd45705, ASSIGN because future has completed 2024-11-16T06:26:35,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure b506bdc55c121092a6b415b5bcd45705, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:26:35,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-16T06:26:35,824 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. 2024-11-16T06:26:35,824 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7752): Opening region: {ENCODED => 1ffa12be9f21008004857405922eb16f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f.', STARTKEY => '2', ENDKEY => ''} 2024-11-16T06:26:35,825 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. service=AccessControlService 2024-11-16T06:26:35,825 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:26:35,825 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:35,825 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:35,825 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7794): checking encryption for 1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:35,825 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7797): checking classloading for 1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:35,827 INFO [StoreOpener-1ffa12be9f21008004857405922eb16f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:35,827 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. 2024-11-16T06:26:35,827 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7752): Opening region: {ENCODED => b506bdc55c121092a6b415b5bcd45705, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705.', STARTKEY => '', ENDKEY => '2'} 2024-11-16T06:26:35,827 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. service=AccessControlService 2024-11-16T06:26:35,828 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:26:35,828 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:35,828 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:35,828 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7794): checking encryption for b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:35,828 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7797): checking classloading for b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:35,828 INFO [StoreOpener-1ffa12be9f21008004857405922eb16f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1ffa12be9f21008004857405922eb16f columnFamilyName cf 2024-11-16T06:26:35,828 DEBUG [StoreOpener-1ffa12be9f21008004857405922eb16f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:35,829 INFO [StoreOpener-1ffa12be9f21008004857405922eb16f-1 {}] regionserver.HStore(327): Store=1ffa12be9f21008004857405922eb16f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:26:35,829 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1038): replaying wal for 1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:35,829 INFO [StoreOpener-b506bdc55c121092a6b415b5bcd45705-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:35,830 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:35,830 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:35,830 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1048): stopping wal replay for 1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:35,830 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1060): Cleaning up temporary data for 1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:35,831 INFO [StoreOpener-b506bdc55c121092a6b415b5bcd45705-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b506bdc55c121092a6b415b5bcd45705 columnFamilyName cf 2024-11-16T06:26:35,831 DEBUG [StoreOpener-b506bdc55c121092a6b415b5bcd45705-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:35,832 INFO [StoreOpener-b506bdc55c121092a6b415b5bcd45705-1 {}] regionserver.HStore(327): Store=b506bdc55c121092a6b415b5bcd45705/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:26:35,832 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1038): replaying wal for b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:35,832 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1093): writing seq id for 1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:35,833 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:35,833 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:35,833 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1048): stopping wal replay for b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:35,834 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1060): Cleaning up temporary data for b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:35,834 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:26:35,834 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1114): Opened 1ffa12be9f21008004857405922eb16f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73006701, jitterRate=0.08788461983203888}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:26:35,834 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:35,835 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1006): Region open journal for 1ffa12be9f21008004857405922eb16f: Running coprocessor pre-open hook at 1731738395826Writing region info on filesystem at 1731738395826Initializing all the Stores at 1731738395826Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738395826Cleaning up temporary data from old regions at 1731738395830 (+4 ms)Running coprocessor post-open hooks at 1731738395834 (+4 ms)Region opened successfully at 1731738395835 (+1 ms) 2024-11-16T06:26:35,835 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1093): writing seq id for b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:35,836 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f., pid=108, masterSystemTime=1731738395821 2024-11-16T06:26:35,837 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:26:35,838 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1114): Opened b506bdc55c121092a6b415b5bcd45705; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71446835, jitterRate=0.06464080512523651}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:26:35,838 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:35,838 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1006): Region open journal for b506bdc55c121092a6b415b5bcd45705: Running coprocessor pre-open hook at 1731738395828Writing region info on filesystem at 1731738395828Initializing all the Stores at 1731738395829 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738395829Cleaning up temporary data from old regions at 1731738395834 (+5 ms)Running coprocessor post-open hooks at 1731738395838 (+4 ms)Region opened successfully at 1731738395838 2024-11-16T06:26:35,838 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. 2024-11-16T06:26:35,838 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. 2024-11-16T06:26:35,839 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705., pid=109, masterSystemTime=1731738395824 2024-11-16T06:26:35,839 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=1ffa12be9f21008004857405922eb16f, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:26:35,840 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. 2024-11-16T06:26:35,840 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. 2024-11-16T06:26:35,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=108, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ffa12be9f21008004857405922eb16f, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:26:35,841 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=b506bdc55c121092a6b415b5bcd45705, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:35,843 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure b506bdc55c121092a6b415b5bcd45705, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:26:35,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-11-16T06:26:35,845 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; OpenRegionProcedure 1ffa12be9f21008004857405922eb16f, server=3456ee6a3164,35939,1731738278639 in 175 msec 2024-11-16T06:26:35,846 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=106 2024-11-16T06:26:35,846 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1ffa12be9f21008004857405922eb16f, ASSIGN in 334 msec 2024-11-16T06:26:35,847 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=106, state=SUCCESS, hasLock=false; OpenRegionProcedure b506bdc55c121092a6b415b5bcd45705, server=3456ee6a3164,37533,1731738278939 in 176 msec 2024-11-16T06:26:35,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=105 2024-11-16T06:26:35,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b506bdc55c121092a6b415b5bcd45705, ASSIGN in 336 msec 2024-11-16T06:26:35,849 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:26:35,849 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738395849"}]},"ts":"1731738395849"} 2024-11-16T06:26:35,851 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-16T06:26:35,852 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:26:35,852 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-16T06:26:35,856 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-16T06:26:35,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:35,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:35,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:35,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:35,859 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:35,859 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:35,859 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:35,859 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:35,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:35,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:35,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:35,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:35,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 393 msec 2024-11-16T06:26:36,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-16T06:26:36,095 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-16T06:26:36,097 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705., hostname=3456ee6a3164,37533,1731738278939, seqNum=2] 2024-11-16T06:26:36,103 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:26:36,106 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-16T06:26:36,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [b506bdc55c121092a6b415b5bcd45705, 1ffa12be9f21008004857405922eb16f] 2024-11-16T06:26:36,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[b506bdc55c121092a6b415b5bcd45705, 1ffa12be9f21008004857405922eb16f], force=true 2024-11-16T06:26:36,129 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[b506bdc55c121092a6b415b5bcd45705, 1ffa12be9f21008004857405922eb16f], force=true 2024-11-16T06:26:36,129 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[b506bdc55c121092a6b415b5bcd45705, 1ffa12be9f21008004857405922eb16f], force=true 2024-11-16T06:26:36,129 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[b506bdc55c121092a6b415b5bcd45705, 1ffa12be9f21008004857405922eb16f], force=true 2024-11-16T06:26:36,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-16T06:26:36,143 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b506bdc55c121092a6b415b5bcd45705, UNASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1ffa12be9f21008004857405922eb16f, UNASSIGN}] 2024-11-16T06:26:36,144 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1ffa12be9f21008004857405922eb16f, UNASSIGN 2024-11-16T06:26:36,144 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b506bdc55c121092a6b415b5bcd45705, UNASSIGN 2024-11-16T06:26:36,145 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=b506bdc55c121092a6b415b5bcd45705, regionState=CLOSING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:36,145 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=1ffa12be9f21008004857405922eb16f, regionState=CLOSING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:26:36,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1ffa12be9f21008004857405922eb16f, UNASSIGN because future has completed 2024-11-16T06:26:36,147 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-16T06:26:36,148 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1ffa12be9f21008004857405922eb16f, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:26:36,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b506bdc55c121092a6b415b5bcd45705, UNASSIGN because future has completed 2024-11-16T06:26:36,149 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-16T06:26:36,149 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=114, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure b506bdc55c121092a6b415b5bcd45705, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:26:36,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-16T06:26:36,301 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(122): Close 1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:36,301 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-16T06:26:36,301 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1722): Closing 1ffa12be9f21008004857405922eb16f, disabling compactions & flushes 2024-11-16T06:26:36,301 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. 2024-11-16T06:26:36,301 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. 2024-11-16T06:26:36,301 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. after waiting 0 ms 2024-11-16T06:26:36,301 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. 2024-11-16T06:26:36,302 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2902): Flushing 1ffa12be9f21008004857405922eb16f 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-16T06:26:36,302 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(122): Close b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:36,302 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-16T06:26:36,302 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1722): Closing b506bdc55c121092a6b415b5bcd45705, disabling compactions & flushes 2024-11-16T06:26:36,302 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. 2024-11-16T06:26:36,302 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. 2024-11-16T06:26:36,302 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. after waiting 0 ms 2024-11-16T06:26:36,302 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. 2024-11-16T06:26:36,302 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(2902): Flushing b506bdc55c121092a6b415b5bcd45705 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-16T06:26:36,319 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/.tmp/cf/2ed21bf1418c4c93aab160434fbde875 is 28, key is 2/cf:/1731738396104/Put/seqid=0 2024-11-16T06:26:36,319 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/.tmp/cf/05463b20d3b64680ae2aa45746729492 is 28, key is 1/cf:/1731738396099/Put/seqid=0 2024-11-16T06:26:36,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742127_1303 (size=4945) 2024-11-16T06:26:36,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742127_1303 (size=4945) 2024-11-16T06:26:36,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742127_1303 (size=4945) 2024-11-16T06:26:36,331 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/.tmp/cf/05463b20d3b64680ae2aa45746729492 2024-11-16T06:26:36,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742128_1304 (size=4945) 2024-11-16T06:26:36,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742128_1304 (size=4945) 2024-11-16T06:26:36,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742128_1304 (size=4945) 2024-11-16T06:26:36,336 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/.tmp/cf/2ed21bf1418c4c93aab160434fbde875 2024-11-16T06:26:36,336 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/.tmp/cf/05463b20d3b64680ae2aa45746729492 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/cf/05463b20d3b64680ae2aa45746729492 2024-11-16T06:26:36,340 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/.tmp/cf/2ed21bf1418c4c93aab160434fbde875 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/cf/2ed21bf1418c4c93aab160434fbde875 2024-11-16T06:26:36,341 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/cf/05463b20d3b64680ae2aa45746729492, entries=1, sequenceid=5, filesize=4.8 K 2024-11-16T06:26:36,341 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for b506bdc55c121092a6b415b5bcd45705 in 39ms, sequenceid=5, compaction requested=false 2024-11-16T06:26:36,342 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-16T06:26:36,346 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/cf/2ed21bf1418c4c93aab160434fbde875, entries=1, sequenceid=5, filesize=4.8 K 2024-11-16T06:26:36,346 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-16T06:26:36,346 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:26:36,346 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. 2024-11-16T06:26:36,347 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 1ffa12be9f21008004857405922eb16f in 45ms, sequenceid=5, compaction requested=false 2024-11-16T06:26:36,347 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1676): Region close journal for b506bdc55c121092a6b415b5bcd45705: Waiting for close lock at 1731738396302Running coprocessor pre-close hooks at 1731738396302Disabling compacts and flushes for region at 1731738396302Disabling writes for close at 1731738396302Obtaining lock to block concurrent updates at 1731738396302Preparing flush snapshotting stores in b506bdc55c121092a6b415b5bcd45705 at 1731738396302Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1731738396302Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705. at 1731738396303 (+1 ms)Flushing b506bdc55c121092a6b415b5bcd45705/cf: creating writer at 1731738396303Flushing b506bdc55c121092a6b415b5bcd45705/cf: appending metadata at 1731738396318 (+15 ms)Flushing b506bdc55c121092a6b415b5bcd45705/cf: closing flushed file at 1731738396318Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66aa7d50: reopening flushed file at 1731738396335 (+17 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for b506bdc55c121092a6b415b5bcd45705 in 39ms, sequenceid=5, compaction requested=false at 1731738396341 (+6 ms)Writing region close event to WAL at 1731738396343 (+2 ms)Running coprocessor post-close hooks at 1731738396346 (+3 ms)Closed at 1731738396346 2024-11-16T06:26:36,348 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(157): Closed b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:36,349 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=b506bdc55c121092a6b415b5bcd45705, regionState=CLOSED 2024-11-16T06:26:36,350 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-16T06:26:36,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=114, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure b506bdc55c121092a6b415b5bcd45705, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:26:36,351 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:26:36,351 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. 2024-11-16T06:26:36,351 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1676): Region close journal for 1ffa12be9f21008004857405922eb16f: Waiting for close lock at 1731738396301Running coprocessor pre-close hooks at 1731738396301Disabling compacts and flushes for region at 1731738396301Disabling writes for close at 1731738396301Obtaining lock to block concurrent updates at 1731738396302 (+1 ms)Preparing flush snapshotting stores in 1ffa12be9f21008004857405922eb16f at 1731738396302Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1731738396302Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f. at 1731738396302Flushing 1ffa12be9f21008004857405922eb16f/cf: creating writer at 1731738396303 (+1 ms)Flushing 1ffa12be9f21008004857405922eb16f/cf: appending metadata at 1731738396318 (+15 ms)Flushing 1ffa12be9f21008004857405922eb16f/cf: closing flushed file at 1731738396318Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d5ebc2e: reopening flushed file at 1731738396340 (+22 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 1ffa12be9f21008004857405922eb16f in 45ms, sequenceid=5, compaction requested=false at 1731738396347 (+7 ms)Writing region close event to WAL at 1731738396347Running coprocessor post-close hooks at 1731738396351 (+4 ms)Closed at 1731738396351 2024-11-16T06:26:36,353 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(157): Closed 1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:36,353 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=114, resume processing ppid=111 2024-11-16T06:26:36,353 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, ppid=111, state=SUCCESS, hasLock=false; CloseRegionProcedure b506bdc55c121092a6b415b5bcd45705, server=3456ee6a3164,37533,1731738278939 in 202 msec 2024-11-16T06:26:36,353 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=1ffa12be9f21008004857405922eb16f, regionState=CLOSED 2024-11-16T06:26:36,354 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b506bdc55c121092a6b415b5bcd45705, UNASSIGN in 210 msec 2024-11-16T06:26:36,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=113, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1ffa12be9f21008004857405922eb16f, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:26:36,357 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=113, resume processing ppid=112 2024-11-16T06:26:36,357 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, ppid=112, state=SUCCESS, hasLock=false; CloseRegionProcedure 1ffa12be9f21008004857405922eb16f, server=3456ee6a3164,35939,1731738278639 in 208 msec 2024-11-16T06:26:36,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=110 2024-11-16T06:26:36,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1ffa12be9f21008004857405922eb16f, UNASSIGN in 214 msec 2024-11-16T06:26:36,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742129_1305 (size=84) 2024-11-16T06:26:36,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742129_1305 (size=84) 2024-11-16T06:26:36,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742129_1305 (size=84) 2024-11-16T06:26:36,377 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:36,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742130_1306 (size=20) 2024-11-16T06:26:36,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742130_1306 (size=20) 2024-11-16T06:26:36,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742130_1306 (size=20) 2024-11-16T06:26:36,393 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:36,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742131_1307 (size=21) 2024-11-16T06:26:36,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742131_1307 (size=21) 2024-11-16T06:26:36,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742131_1307 (size=21) 2024-11-16T06:26:36,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742132_1308 (size=84) 2024-11-16T06:26:36,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742132_1308 (size=84) 2024-11-16T06:26:36,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742132_1308 (size=84) 2024-11-16T06:26:36,411 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:36,421 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-16T06:26:36,423 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395466.b506bdc55c121092a6b415b5bcd45705.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-16T06:26:36,423 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1731738395466.1ffa12be9f21008004857405922eb16f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-16T06:26:36,423 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-16T06:26:36,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3649df5adf8edce351b9958fc12f93e3, ASSIGN}] 2024-11-16T06:26:36,442 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3649df5adf8edce351b9958fc12f93e3, ASSIGN 2024-11-16T06:26:36,443 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3649df5adf8edce351b9958fc12f93e3, ASSIGN; state=MERGED, location=3456ee6a3164,37533,1731738278939; forceNewPlan=false, retain=false 2024-11-16T06:26:36,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-16T06:26:36,594 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-16T06:26:36,594 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=3649df5adf8edce351b9958fc12f93e3, regionState=OPENING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:36,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3649df5adf8edce351b9958fc12f93e3, ASSIGN because future has completed 2024-11-16T06:26:36,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3649df5adf8edce351b9958fc12f93e3, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:26:36,746 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:26:36,753 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3. 2024-11-16T06:26:36,754 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7752): Opening region: {ENCODED => 3649df5adf8edce351b9958fc12f93e3, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3.', STARTKEY => '', ENDKEY => ''} 2024-11-16T06:26:36,754 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3. service=AccessControlService 2024-11-16T06:26:36,755 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:26:36,755 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:36,755 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:36,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-16T06:26:36,755 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7794): checking encryption for 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:36,755 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7797): checking classloading for 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:36,757 INFO [StoreOpener-3649df5adf8edce351b9958fc12f93e3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:36,759 INFO [StoreOpener-3649df5adf8edce351b9958fc12f93e3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3649df5adf8edce351b9958fc12f93e3 columnFamilyName cf 2024-11-16T06:26:36,759 DEBUG [StoreOpener-3649df5adf8edce351b9958fc12f93e3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:36,778 DEBUG [StoreOpener-3649df5adf8edce351b9958fc12f93e3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/cf/05463b20d3b64680ae2aa45746729492.b506bdc55c121092a6b415b5bcd45705->hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/cf/05463b20d3b64680ae2aa45746729492-top 2024-11-16T06:26:36,783 DEBUG [StoreOpener-3649df5adf8edce351b9958fc12f93e3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/cf/2ed21bf1418c4c93aab160434fbde875.1ffa12be9f21008004857405922eb16f->hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/cf/2ed21bf1418c4c93aab160434fbde875-top 2024-11-16T06:26:36,784 INFO [StoreOpener-3649df5adf8edce351b9958fc12f93e3-1 {}] regionserver.HStore(327): Store=3649df5adf8edce351b9958fc12f93e3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:26:36,784 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1038): replaying wal for 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:36,785 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:36,786 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:36,787 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1048): stopping wal replay for 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:36,787 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1060): Cleaning up temporary data for 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:36,789 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1093): writing seq id for 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:36,790 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1114): Opened 3649df5adf8edce351b9958fc12f93e3; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68859236, jitterRate=0.02608257532119751}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:26:36,790 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:36,791 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1006): Region open journal for 3649df5adf8edce351b9958fc12f93e3: Running coprocessor pre-open hook at 1731738396755Writing region info on filesystem at 1731738396755Initializing all the Stores at 1731738396757 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738396757Cleaning up temporary data from old regions at 1731738396787 (+30 ms)Running coprocessor post-open hooks at 1731738396790 (+3 ms)Region opened successfully at 1731738396791 (+1 ms) 2024-11-16T06:26:36,792 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3., pid=116, masterSystemTime=1731738396750 2024-11-16T06:26:36,792 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3.,because compaction is disabled. 2024-11-16T06:26:36,795 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3. 2024-11-16T06:26:36,795 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3. 2024-11-16T06:26:36,796 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=3649df5adf8edce351b9958fc12f93e3, regionState=OPEN, openSeqNum=9, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:36,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3649df5adf8edce351b9958fc12f93e3, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:26:36,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=115 2024-11-16T06:26:36,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 3649df5adf8edce351b9958fc12f93e3, server=3456ee6a3164,37533,1731738278939 in 202 msec 2024-11-16T06:26:36,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=110 2024-11-16T06:26:36,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3649df5adf8edce351b9958fc12f93e3, ASSIGN in 360 msec 2024-11-16T06:26:36,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[b506bdc55c121092a6b415b5bcd45705, 1ffa12be9f21008004857405922eb16f], force=true in 679 msec 2024-11-16T06:26:37,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-16T06:26:37,265 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-16T06:26:37,266 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-16T06:26:37,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738397266 (current time:1731738397266). 2024-11-16T06:26:37,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:26:37,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-16T06:26:37,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:26:37,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6af6a41e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:37,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:26:37,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:26:37,268 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:26:37,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:26:37,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:26:37,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@256133fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:37,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:26:37,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:26:37,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:37,270 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40508, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:26:37,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e6f6d06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:37,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:26:37,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:26:37,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:37,272 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39126, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:37,274 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:26:37,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:26:37,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:37,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:37,274 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:26:37,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16014013, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:37,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:26:37,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:26:37,275 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:26:37,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:26:37,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:26:37,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@352cfc23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:37,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:26:37,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:26:37,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:37,277 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40514, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:26:37,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57bf3001, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:37,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:26:37,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:26:37,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:37,280 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39138, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:37,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:26:37,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:37,283 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42884, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:37,284 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:26:37,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:26:37,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:37,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:37,284 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:26:37,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-16T06:26:37,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:26:37,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-16T06:26:37,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-16T06:26:37,286 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:26:37,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-16T06:26:37,287 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:26:37,289 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:26:37,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742133_1309 (size=216) 2024-11-16T06:26:37,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742133_1309 (size=216) 2024-11-16T06:26:37,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742133_1309 (size=216) 2024-11-16T06:26:37,300 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:26:37,300 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3649df5adf8edce351b9958fc12f93e3}] 2024-11-16T06:26:37,301 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:37,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-16T06:26:37,435 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0005_000001 (auth:SIMPLE) from 127.0.0.1:37750 2024-11-16T06:26:37,448 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_0/usercache/jenkins/appcache/application_1731738285782_0005/container_1731738285782_0005_01_000001/launch_container.sh] 2024-11-16T06:26:37,448 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_0/usercache/jenkins/appcache/application_1731738285782_0005/container_1731738285782_0005_01_000001/container_tokens] 2024-11-16T06:26:37,448 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_0/usercache/jenkins/appcache/application_1731738285782_0005/container_1731738285782_0005_01_000001/sysfs] 2024-11-16T06:26:37,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=118 2024-11-16T06:26:37,452 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3. 2024-11-16T06:26:37,452 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.HRegion(2603): Flush status journal for 3649df5adf8edce351b9958fc12f93e3: 2024-11-16T06:26:37,452 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-16T06:26:37,452 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:37,452 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:26:37,452 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/cf/05463b20d3b64680ae2aa45746729492.b506bdc55c121092a6b415b5bcd45705->hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/cf/05463b20d3b64680ae2aa45746729492-top, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/cf/2ed21bf1418c4c93aab160434fbde875.1ffa12be9f21008004857405922eb16f->hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/cf/2ed21bf1418c4c93aab160434fbde875-top] hfiles 2024-11-16T06:26:37,452 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/cf/05463b20d3b64680ae2aa45746729492.b506bdc55c121092a6b415b5bcd45705 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:37,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/cf/2ed21bf1418c4c93aab160434fbde875.1ffa12be9f21008004857405922eb16f for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:37,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742134_1310 (size=269) 2024-11-16T06:26:37,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742134_1310 (size=269) 2024-11-16T06:26:37,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742134_1310 (size=269) 2024-11-16T06:26:37,463 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3. 2024-11-16T06:26:37,463 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=118 2024-11-16T06:26:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=118 2024-11-16T06:26:37,464 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:37,464 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:37,466 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=117 2024-11-16T06:26:37,466 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:26:37,466 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=117, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3649df5adf8edce351b9958fc12f93e3 in 165 msec 2024-11-16T06:26:37,467 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:26:37,468 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:26:37,468 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:37,468 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:37,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742135_1311 (size=670) 2024-11-16T06:26:37,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742135_1311 (size=670) 2024-11-16T06:26:37,488 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:26:37,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742135_1311 (size=670) 2024-11-16T06:26:37,494 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:26:37,494 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:37,496 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:26:37,496 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-16T06:26:37,497 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 211 msec 2024-11-16T06:26:37,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-16T06:26:37,605 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-16T06:26:37,605 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738397605 2024-11-16T06:26:37,605 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:36767, tgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738397605, rawTgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738397605, srcFsUri=hdfs://localhost:36767, srcDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:37,646 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:36767, inputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:37,646 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738397605, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738397605/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:37,647 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-16T06:26:37,652 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738397605/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:37,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742136_1312 (size=216) 2024-11-16T06:26:37,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742136_1312 (size=216) 2024-11-16T06:26:37,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742136_1312 (size=216) 2024-11-16T06:26:37,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742137_1313 (size=670) 2024-11-16T06:26:37,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742137_1313 (size=670) 2024-11-16T06:26:37,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742137_1313 (size=670) 2024-11-16T06:26:38,076 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:38,076 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:38,076 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:38,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:38,275 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-16T06:26:38,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:38,276 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-16T06:26:38,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-16T06:26:38,943 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:26:39,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-8710321053040298694.jar 2024-11-16T06:26:39,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:39,264 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:39,335 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-3212525698579248276.jar 2024-11-16T06:26:39,335 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:39,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:39,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:39,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:39,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:39,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:26:39,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-16T06:26:39,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-16T06:26:39,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-16T06:26:39,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-16T06:26:39,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-16T06:26:39,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-16T06:26:39,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-16T06:26:39,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-16T06:26:39,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-16T06:26:39,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-16T06:26:39,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-16T06:26:39,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:26:39,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:26:39,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:26:39,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:26:39,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:26:39,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:26:39,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:26:39,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742138_1314 (size=131440) 2024-11-16T06:26:39,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742138_1314 (size=131440) 2024-11-16T06:26:39,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742138_1314 (size=131440) 2024-11-16T06:26:39,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742139_1315 (size=4188619) 2024-11-16T06:26:39,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742139_1315 (size=4188619) 2024-11-16T06:26:39,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742139_1315 (size=4188619) 2024-11-16T06:26:39,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742140_1316 (size=1323991) 2024-11-16T06:26:39,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742140_1316 (size=1323991) 2024-11-16T06:26:39,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742140_1316 (size=1323991) 2024-11-16T06:26:39,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742141_1317 (size=6424738) 2024-11-16T06:26:39,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742141_1317 (size=6424738) 2024-11-16T06:26:39,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742141_1317 (size=6424738) 2024-11-16T06:26:39,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742142_1318 (size=903732) 2024-11-16T06:26:39,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742142_1318 (size=903732) 2024-11-16T06:26:39,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742142_1318 (size=903732) 2024-11-16T06:26:39,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742143_1319 (size=8360083) 2024-11-16T06:26:39,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742143_1319 (size=8360083) 2024-11-16T06:26:39,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742143_1319 (size=8360083) 2024-11-16T06:26:39,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742144_1320 (size=1877034) 2024-11-16T06:26:39,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742144_1320 (size=1877034) 2024-11-16T06:26:39,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742144_1320 (size=1877034) 2024-11-16T06:26:39,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742145_1321 (size=77835) 2024-11-16T06:26:39,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742145_1321 (size=77835) 2024-11-16T06:26:39,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742145_1321 (size=77835) 2024-11-16T06:26:39,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742146_1322 (size=30949) 2024-11-16T06:26:39,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742146_1322 (size=30949) 2024-11-16T06:26:39,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742146_1322 (size=30949) 2024-11-16T06:26:39,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742147_1323 (size=1597327) 2024-11-16T06:26:39,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742147_1323 (size=1597327) 2024-11-16T06:26:39,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742147_1323 (size=1597327) 2024-11-16T06:26:39,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742148_1324 (size=4695811) 2024-11-16T06:26:39,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742148_1324 (size=4695811) 2024-11-16T06:26:39,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742148_1324 (size=4695811) 2024-11-16T06:26:39,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742149_1325 (size=232957) 2024-11-16T06:26:39,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742149_1325 (size=232957) 2024-11-16T06:26:39,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742149_1325 (size=232957) 2024-11-16T06:26:39,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742150_1326 (size=127628) 2024-11-16T06:26:39,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742150_1326 (size=127628) 2024-11-16T06:26:39,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742150_1326 (size=127628) 2024-11-16T06:26:39,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742151_1327 (size=20406) 2024-11-16T06:26:39,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742151_1327 (size=20406) 2024-11-16T06:26:39,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742151_1327 (size=20406) 2024-11-16T06:26:39,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742152_1328 (size=5175431) 2024-11-16T06:26:39,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742152_1328 (size=5175431) 2024-11-16T06:26:39,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742152_1328 (size=5175431) 2024-11-16T06:26:39,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742153_1329 (size=440656) 2024-11-16T06:26:39,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742153_1329 (size=440656) 2024-11-16T06:26:39,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742153_1329 (size=440656) 2024-11-16T06:26:39,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742154_1330 (size=217634) 2024-11-16T06:26:39,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742154_1330 (size=217634) 2024-11-16T06:26:39,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742154_1330 (size=217634) 2024-11-16T06:26:39,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742155_1331 (size=1832290) 2024-11-16T06:26:39,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742155_1331 (size=1832290) 2024-11-16T06:26:39,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742155_1331 (size=1832290) 2024-11-16T06:26:39,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742156_1332 (size=322274) 2024-11-16T06:26:39,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742156_1332 (size=322274) 2024-11-16T06:26:39,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742156_1332 (size=322274) 2024-11-16T06:26:39,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742157_1333 (size=503880) 2024-11-16T06:26:39,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742157_1333 (size=503880) 2024-11-16T06:26:39,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742157_1333 (size=503880) 2024-11-16T06:26:39,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742158_1334 (size=29229) 2024-11-16T06:26:39,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742158_1334 (size=29229) 2024-11-16T06:26:39,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742158_1334 (size=29229) 2024-11-16T06:26:39,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742159_1335 (size=24096) 2024-11-16T06:26:39,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742159_1335 (size=24096) 2024-11-16T06:26:39,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742159_1335 (size=24096) 2024-11-16T06:26:39,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742160_1336 (size=111872) 2024-11-16T06:26:39,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742160_1336 (size=111872) 2024-11-16T06:26:39,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742160_1336 (size=111872) 2024-11-16T06:26:39,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742161_1337 (size=45609) 2024-11-16T06:26:39,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742161_1337 (size=45609) 2024-11-16T06:26:39,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742161_1337 (size=45609) 2024-11-16T06:26:39,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742162_1338 (size=136454) 2024-11-16T06:26:39,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742162_1338 (size=136454) 2024-11-16T06:26:39,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742162_1338 (size=136454) 2024-11-16T06:26:39,906 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-16T06:26:39,909 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-16T06:26:39,910 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=9.7 K 2024-11-16T06:26:39,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742163_1339 (size=378) 2024-11-16T06:26:39,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742163_1339 (size=378) 2024-11-16T06:26:39,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742163_1339 (size=378) 2024-11-16T06:26:39,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742164_1340 (size=15) 2024-11-16T06:26:39,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742164_1340 (size=15) 2024-11-16T06:26:39,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742164_1340 (size=15) 2024-11-16T06:26:39,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742165_1341 (size=303785) 2024-11-16T06:26:39,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742165_1341 (size=303785) 2024-11-16T06:26:39,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742165_1341 (size=303785) 2024-11-16T06:26:39,957 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:26:39,958 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:26:40,276 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0006_000001 (auth:SIMPLE) from 127.0.0.1:40228 2024-11-16T06:26:40,574 WARN [regionserver/3456ee6a3164:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 4, running: 1 2024-11-16T06:26:45,762 DEBUG [master/3456ee6a3164:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 4f48e9ca11073ed9d5885225fa800cb0 changed from -1.0 to 0.0, refreshing cache 2024-11-16T06:26:45,762 DEBUG [master/3456ee6a3164:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 2bf927a7e914904d7d25f15851efde52 changed from -1.0 to 0.0, refreshing cache 2024-11-16T06:26:45,825 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0006_000001 (auth:SIMPLE) from 127.0.0.1:55022 2024-11-16T06:26:46,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742166_1342 (size=349435) 2024-11-16T06:26:46,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742166_1342 (size=349435) 2024-11-16T06:26:46,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742166_1342 (size=349435) 2024-11-16T06:26:48,043 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0006_000001 (auth:SIMPLE) from 127.0.0.1:40238 2024-11-16T06:26:50,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742167_1343 (size=4945) 2024-11-16T06:26:50,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742167_1343 (size=4945) 2024-11-16T06:26:50,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742167_1343 (size=4945) 2024-11-16T06:26:50,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742168_1344 (size=4945) 2024-11-16T06:26:50,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742168_1344 (size=4945) 2024-11-16T06:26:50,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742168_1344 (size=4945) 2024-11-16T06:26:51,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742169_1345 (size=17474) 2024-11-16T06:26:51,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742169_1345 (size=17474) 2024-11-16T06:26:51,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742169_1345 (size=17474) 2024-11-16T06:26:51,071 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_2/usercache/jenkins/appcache/application_1731738285782_0006/container_1731738285782_0006_01_000002/launch_container.sh] 2024-11-16T06:26:51,072 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_2/usercache/jenkins/appcache/application_1731738285782_0006/container_1731738285782_0006_01_000002/container_tokens] 2024-11-16T06:26:51,072 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_2/usercache/jenkins/appcache/application_1731738285782_0006/container_1731738285782_0006_01_000002/sysfs] 2024-11-16T06:26:51,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742170_1346 (size=482) 2024-11-16T06:26:51,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742170_1346 (size=482) 2024-11-16T06:26:51,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742170_1346 (size=482) 2024-11-16T06:26:51,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742171_1347 (size=17474) 2024-11-16T06:26:51,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742171_1347 (size=17474) 2024-11-16T06:26:51,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742171_1347 (size=17474) 2024-11-16T06:26:51,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742172_1348 (size=349435) 2024-11-16T06:26:51,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742172_1348 (size=349435) 2024-11-16T06:26:51,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742172_1348 (size=349435) 2024-11-16T06:26:51,544 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0006_000001 (auth:SIMPLE) from 127.0.0.1:52772 2024-11-16T06:26:53,308 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-16T06:26:53,309 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-16T06:26:53,315 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,315 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-16T06:26:53,316 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-16T06:26:53,316 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,316 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-16T06:26:53,316 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-16T06:26:53,316 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738397605/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738397605/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,317 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738397605/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-16T06:26:53,317 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738397605/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-16T06:26:53,322 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-16T06:26:53,325 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738413325"}]},"ts":"1731738413325"} 2024-11-16T06:26:53,326 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-16T06:26:53,326 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-16T06:26:53,327 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-16T06:26:53,328 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3649df5adf8edce351b9958fc12f93e3, UNASSIGN}] 2024-11-16T06:26:53,329 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3649df5adf8edce351b9958fc12f93e3, UNASSIGN 2024-11-16T06:26:53,330 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=3649df5adf8edce351b9958fc12f93e3, regionState=CLOSING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:53,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3649df5adf8edce351b9958fc12f93e3, UNASSIGN because future has completed 2024-11-16T06:26:53,333 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:26:53,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3649df5adf8edce351b9958fc12f93e3, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:26:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-16T06:26:53,485 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(122): Close 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:53,486 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:26:53,486 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1722): Closing 3649df5adf8edce351b9958fc12f93e3, disabling compactions & flushes 2024-11-16T06:26:53,486 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3. 2024-11-16T06:26:53,486 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3. 2024-11-16T06:26:53,486 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3. after waiting 0 ms 2024-11-16T06:26:53,486 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3. 2024-11-16T06:26:53,493 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-16T06:26:53,493 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:26:53,494 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3. 2024-11-16T06:26:53,494 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1676): Region close journal for 3649df5adf8edce351b9958fc12f93e3: Waiting for close lock at 1731738413486Running coprocessor pre-close hooks at 1731738413486Disabling compacts and flushes for region at 1731738413486Disabling writes for close at 1731738413486Writing region close event to WAL at 1731738413488 (+2 ms)Running coprocessor post-close hooks at 1731738413493 (+5 ms)Closed at 1731738413494 (+1 ms) 2024-11-16T06:26:53,496 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(157): Closed 3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:53,497 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=3649df5adf8edce351b9958fc12f93e3, regionState=CLOSED 2024-11-16T06:26:53,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3649df5adf8edce351b9958fc12f93e3, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:26:53,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=122, resume processing ppid=121 2024-11-16T06:26:53,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, ppid=121, state=SUCCESS, hasLock=false; CloseRegionProcedure 3649df5adf8edce351b9958fc12f93e3, server=3456ee6a3164,37533,1731738278939 in 167 msec 2024-11-16T06:26:53,504 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=120 2024-11-16T06:26:53,504 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=120, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3649df5adf8edce351b9958fc12f93e3, UNASSIGN in 174 msec 2024-11-16T06:26:53,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-11-16T06:26:53,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 178 msec 2024-11-16T06:26:53,508 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738413508"}]},"ts":"1731738413508"} 2024-11-16T06:26:53,510 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-16T06:26:53,510 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-16T06:26:53,511 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 188 msec 2024-11-16T06:26:53,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-16T06:26:53,646 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-16T06:26:53,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,650 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,652 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=123, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,654 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,655 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:53,655 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:53,655 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:53,657 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/recovered.edits] 2024-11-16T06:26:53,657 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/recovered.edits] 2024-11-16T06:26:53,657 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/recovered.edits] 2024-11-16T06:26:53,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,659 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-16T06:26:53,659 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-16T06:26:53,659 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-16T06:26:53,659 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-16T06:26:53,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:53,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:53,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:53,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:53,663 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:53,663 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=123 2024-11-16T06:26:53,663 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/cf/05463b20d3b64680ae2aa45746729492 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/cf/05463b20d3b64680ae2aa45746729492 2024-11-16T06:26:53,663 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:53,663 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:53,664 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/cf/05463b20d3b64680ae2aa45746729492.b506bdc55c121092a6b415b5bcd45705 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/cf/05463b20d3b64680ae2aa45746729492.b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:53,664 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/cf/2ed21bf1418c4c93aab160434fbde875 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/cf/2ed21bf1418c4c93aab160434fbde875 2024-11-16T06:26:53,665 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/cf/2ed21bf1418c4c93aab160434fbde875.1ffa12be9f21008004857405922eb16f to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/cf/2ed21bf1418c4c93aab160434fbde875.1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:53,666 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/recovered.edits/8.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705/recovered.edits/8.seqid 2024-11-16T06:26:53,667 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b506bdc55c121092a6b415b5bcd45705 2024-11-16T06:26:53,667 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/recovered.edits/8.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f/recovered.edits/8.seqid 2024-11-16T06:26:53,668 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1ffa12be9f21008004857405922eb16f 2024-11-16T06:26:53,668 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/recovered.edits/12.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3/recovered.edits/12.seqid 2024-11-16T06:26:53,673 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3649df5adf8edce351b9958fc12f93e3 2024-11-16T06:26:53,678 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-16T06:26:53,682 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=123, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,686 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-16T06:26:53,689 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-16T06:26:53,691 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=123, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,691 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-16T06:26:53,691 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738413691"}]},"ts":"9223372036854775807"} 2024-11-16T06:26:53,693 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-16T06:26:53,693 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 3649df5adf8edce351b9958fc12f93e3, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3.', STARTKEY => '', ENDKEY => ''}] 2024-11-16T06:26:53,693 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-16T06:26:53,693 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731738413693"}]},"ts":"9223372036854775807"} 2024-11-16T06:26:53,695 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-16T06:26:53,696 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=123, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 49 msec 2024-11-16T06:26:53,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=123 2024-11-16T06:26:53,776 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:53,776 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-16T06:26:53,777 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:53,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:53,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-16T06:26:53,782 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738413782"}]},"ts":"1731738413782"} 2024-11-16T06:26:53,785 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-16T06:26:53,785 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-16T06:26:53,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-16T06:26:53,788 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4f48e9ca11073ed9d5885225fa800cb0, UNASSIGN}, {pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2bf927a7e914904d7d25f15851efde52, UNASSIGN}] 2024-11-16T06:26:53,789 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4f48e9ca11073ed9d5885225fa800cb0, UNASSIGN 2024-11-16T06:26:53,789 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2bf927a7e914904d7d25f15851efde52, UNASSIGN 2024-11-16T06:26:53,789 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=4f48e9ca11073ed9d5885225fa800cb0, regionState=CLOSING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:53,789 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=2bf927a7e914904d7d25f15851efde52, regionState=CLOSING, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:26:53,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4f48e9ca11073ed9d5885225fa800cb0, UNASSIGN because future has completed 2024-11-16T06:26:53,791 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:26:53,791 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=128, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:26:53,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2bf927a7e914904d7d25f15851efde52, UNASSIGN because future has completed 2024-11-16T06:26:53,792 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:26:53,793 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2bf927a7e914904d7d25f15851efde52, server=3456ee6a3164,40155,1731738278871}] 2024-11-16T06:26:53,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-16T06:26:53,944 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(122): Close 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:53,944 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:26:53,944 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1722): Closing 4f48e9ca11073ed9d5885225fa800cb0, disabling compactions & flushes 2024-11-16T06:26:53,944 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:53,944 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:53,944 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. after waiting 0 ms 2024-11-16T06:26:53,944 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:53,946 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:53,946 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:26:53,947 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 2bf927a7e914904d7d25f15851efde52, disabling compactions & flushes 2024-11-16T06:26:53,947 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:53,947 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:53,947 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. after waiting 0 ms 2024-11-16T06:26:53,947 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:53,949 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:26:53,950 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:26:53,950 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0. 2024-11-16T06:26:53,950 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1676): Region close journal for 4f48e9ca11073ed9d5885225fa800cb0: Waiting for close lock at 1731738413944Running coprocessor pre-close hooks at 1731738413944Disabling compacts and flushes for region at 1731738413944Disabling writes for close at 1731738413944Writing region close event to WAL at 1731738413945 (+1 ms)Running coprocessor post-close hooks at 1731738413950 (+5 ms)Closed at 1731738413950 2024-11-16T06:26:53,953 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(157): Closed 4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:53,953 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=4f48e9ca11073ed9d5885225fa800cb0, regionState=CLOSED 2024-11-16T06:26:53,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:26:53,969 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:26:53,970 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:26:53,970 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52. 2024-11-16T06:26:53,970 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 2bf927a7e914904d7d25f15851efde52: Waiting for close lock at 1731738413946Running coprocessor pre-close hooks at 1731738413946Disabling compacts and flushes for region at 1731738413947 (+1 ms)Disabling writes for close at 1731738413947Writing region close event to WAL at 1731738413948 (+1 ms)Running coprocessor post-close hooks at 1731738413970 (+22 ms)Closed at 1731738413970 2024-11-16T06:26:53,972 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-11-16T06:26:53,972 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:53,973 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; CloseRegionProcedure 4f48e9ca11073ed9d5885225fa800cb0, server=3456ee6a3164,37533,1731738278939 in 178 msec 2024-11-16T06:26:53,974 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4f48e9ca11073ed9d5885225fa800cb0, UNASSIGN in 184 msec 2024-11-16T06:26:53,974 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=2bf927a7e914904d7d25f15851efde52, regionState=CLOSED 2024-11-16T06:26:53,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2bf927a7e914904d7d25f15851efde52, server=3456ee6a3164,40155,1731738278871 because future has completed 2024-11-16T06:26:53,978 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-11-16T06:26:53,978 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 2bf927a7e914904d7d25f15851efde52, server=3456ee6a3164,40155,1731738278871 in 183 msec 2024-11-16T06:26:53,980 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=125 2024-11-16T06:26:53,980 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2bf927a7e914904d7d25f15851efde52, UNASSIGN in 190 msec 2024-11-16T06:26:53,982 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=125, resume processing ppid=124 2024-11-16T06:26:53,982 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, ppid=124, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 195 msec 2024-11-16T06:26:53,983 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738413983"}]},"ts":"1731738413983"} 2024-11-16T06:26:53,985 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-16T06:26:53,985 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-16T06:26:53,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 209 msec 2024-11-16T06:26:54,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-16T06:26:54,095 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-16T06:26:54,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,098 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,099 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=130, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,102 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,104 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:54,104 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:54,107 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/recovered.edits] 2024-11-16T06:26:54,107 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/recovered.edits] 2024-11-16T06:26:54,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,108 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-16T06:26:54,108 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-16T06:26:54,108 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-16T06:26:54,108 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-16T06:26:54,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:54,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:54,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:54,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:54,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-16T06:26:54,112 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/cf/2d959ee09eed454daf5378ad93a478d1 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/cf/2d959ee09eed454daf5378ad93a478d1 2024-11-16T06:26:54,112 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/cf/c701f41d6b16469c92f0e092fd089bb3 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/cf/c701f41d6b16469c92f0e092fd089bb3 2024-11-16T06:26:54,115 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52/recovered.edits/9.seqid 2024-11-16T06:26:54,115 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0/recovered.edits/9.seqid 2024-11-16T06:26:54,115 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:54,115 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithMergeRegion/4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:54,116 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-16T06:26:54,116 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-11-16T06:26:54,117 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-11-16T06:26:54,120 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411168f9435fbddf3459c8bdd74f08d13fe57_2bf927a7e914904d7d25f15851efde52 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411168f9435fbddf3459c8bdd74f08d13fe57_2bf927a7e914904d7d25f15851efde52 2024-11-16T06:26:54,124 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241116cb85619b713f45ddb2f0bee7b9a526ef_4f48e9ca11073ed9d5885225fa800cb0 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241116cb85619b713f45ddb2f0bee7b9a526ef_4f48e9ca11073ed9d5885225fa800cb0 2024-11-16T06:26:54,124 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-11-16T06:26:54,127 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=130, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,130 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-16T06:26:54,133 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-16T06:26:54,135 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=130, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,135 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-16T06:26:54,135 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738414135"}]},"ts":"9223372036854775807"} 2024-11-16T06:26:54,135 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738414135"}]},"ts":"9223372036854775807"} 2024-11-16T06:26:54,138 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-16T06:26:54,138 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 4f48e9ca11073ed9d5885225fa800cb0, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731738394063.4f48e9ca11073ed9d5885225fa800cb0.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 2bf927a7e914904d7d25f15851efde52, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731738394063.2bf927a7e914904d7d25f15851efde52.', STARTKEY => '1', ENDKEY => ''}] 2024-11-16T06:26:54,138 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-16T06:26:54,138 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731738414138"}]},"ts":"9223372036854775807"} 2024-11-16T06:26:54,140 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-16T06:26:54,141 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=130, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,143 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 45 msec 2024-11-16T06:26:54,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-16T06:26:54,215 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,215 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-16T06:26:54,223 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-16T06:26:54,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-16T06:26:54,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:54,229 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-16T06:26:54,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:54,251 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=801 (was 795) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41961 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4855 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1199032633_1 at /127.0.0.1:57828 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:57850 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 125323) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (692179358) connection to localhost/127.0.0.1:41961 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:59060 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1199032633_1 at /127.0.0.1:59050 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:38798 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 815), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=527 (was 660), ProcessCount=16 (was 19), AvailableMemoryMB=826 (was 453) - AvailableMemoryMB LEAK? - 2024-11-16T06:26:54,252 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-11-16T06:26:54,270 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=801, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=527, ProcessCount=16, AvailableMemoryMB=824 2024-11-16T06:26:54,271 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-11-16T06:26:54,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:26:54,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-16T06:26:54,274 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:26:54,274 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 131 2024-11-16T06:26:54,275 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:26:54,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-16T06:26:54,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742173_1349 (size=443) 2024-11-16T06:26:54,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742173_1349 (size=443) 2024-11-16T06:26:54,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742173_1349 (size=443) 2024-11-16T06:26:54,286 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 0d24763c14f02ee7f64a21bcb825ac8e, NAME => 'testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:54,286 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 151f6e94f7574f3a28f05b933028ac52, NAME => 'testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:54,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742175_1351 (size=68) 2024-11-16T06:26:54,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742174_1350 (size=68) 2024-11-16T06:26:54,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742175_1351 (size=68) 2024-11-16T06:26:54,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742174_1350 (size=68) 2024-11-16T06:26:54,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742175_1351 (size=68) 2024-11-16T06:26:54,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742174_1350 (size=68) 2024-11-16T06:26:54,301 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:54,301 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:54,301 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 151f6e94f7574f3a28f05b933028ac52, disabling compactions & flushes 2024-11-16T06:26:54,301 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 0d24763c14f02ee7f64a21bcb825ac8e, disabling compactions & flushes 2024-11-16T06:26:54,301 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:54,301 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:26:54,301 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:54,301 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:26:54,301 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. after waiting 0 ms 2024-11-16T06:26:54,301 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. after waiting 0 ms 2024-11-16T06:26:54,301 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:54,301 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:26:54,301 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:54,301 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:26:54,301 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 0d24763c14f02ee7f64a21bcb825ac8e: Waiting for close lock at 1731738414301Disabling compacts and flushes for region at 1731738414301Disabling writes for close at 1731738414301Writing region close event to WAL at 1731738414301Closed at 1731738414301 2024-11-16T06:26:54,301 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 151f6e94f7574f3a28f05b933028ac52: Waiting for close lock at 1731738414301Disabling compacts and flushes for region at 1731738414301Disabling writes for close at 1731738414301Writing region close event to WAL at 1731738414301Closed at 1731738414301 2024-11-16T06:26:54,302 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:26:54,302 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731738414302"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738414302"}]},"ts":"1731738414302"} 2024-11-16T06:26:54,302 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731738414302"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738414302"}]},"ts":"1731738414302"} 2024-11-16T06:26:54,305 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-16T06:26:54,308 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:26:54,308 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738414308"}]},"ts":"1731738414308"} 2024-11-16T06:26:54,310 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-16T06:26:54,311 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:26:54,312 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:26:54,312 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:26:54,312 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:26:54,312 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:26:54,312 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:26:54,312 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:26:54,312 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:26:54,312 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:26:54,312 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:26:54,312 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:26:54,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=151f6e94f7574f3a28f05b933028ac52, ASSIGN}, {pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0d24763c14f02ee7f64a21bcb825ac8e, ASSIGN}] 2024-11-16T06:26:54,318 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0d24763c14f02ee7f64a21bcb825ac8e, ASSIGN 2024-11-16T06:26:54,318 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=151f6e94f7574f3a28f05b933028ac52, ASSIGN 2024-11-16T06:26:54,320 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=151f6e94f7574f3a28f05b933028ac52, ASSIGN; state=OFFLINE, location=3456ee6a3164,37533,1731738278939; forceNewPlan=false, retain=false 2024-11-16T06:26:54,320 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0d24763c14f02ee7f64a21bcb825ac8e, ASSIGN; state=OFFLINE, location=3456ee6a3164,35939,1731738278639; forceNewPlan=false, retain=false 2024-11-16T06:26:54,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-16T06:26:54,471 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-16T06:26:54,471 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=151f6e94f7574f3a28f05b933028ac52, regionState=OPENING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:54,471 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=0d24763c14f02ee7f64a21bcb825ac8e, regionState=OPENING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:26:54,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=151f6e94f7574f3a28f05b933028ac52, ASSIGN because future has completed 2024-11-16T06:26:54,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=134, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure 151f6e94f7574f3a28f05b933028ac52, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:26:54,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0d24763c14f02ee7f64a21bcb825ac8e, ASSIGN because future has completed 2024-11-16T06:26:54,476 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:26:54,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-16T06:26:54,632 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:54,632 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7752): Opening region: {ENCODED => 151f6e94f7574f3a28f05b933028ac52, NAME => 'testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52.', STARTKEY => '', ENDKEY => '1'} 2024-11-16T06:26:54,633 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. service=AccessControlService 2024-11-16T06:26:54,633 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:26:54,633 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:54,633 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:54,633 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7794): checking encryption for 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:54,633 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7797): checking classloading for 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:54,634 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:26:54,634 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 0d24763c14f02ee7f64a21bcb825ac8e, NAME => 'testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e.', STARTKEY => '1', ENDKEY => ''} 2024-11-16T06:26:54,634 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. service=AccessControlService 2024-11-16T06:26:54,635 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:26:54,635 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:54,635 INFO [StoreOpener-151f6e94f7574f3a28f05b933028ac52-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:54,635 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:54,635 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:54,635 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:54,636 INFO [StoreOpener-0d24763c14f02ee7f64a21bcb825ac8e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:54,636 INFO [StoreOpener-151f6e94f7574f3a28f05b933028ac52-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 151f6e94f7574f3a28f05b933028ac52 columnFamilyName cf 2024-11-16T06:26:54,637 DEBUG [StoreOpener-151f6e94f7574f3a28f05b933028ac52-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:54,638 INFO [StoreOpener-151f6e94f7574f3a28f05b933028ac52-1 {}] regionserver.HStore(327): Store=151f6e94f7574f3a28f05b933028ac52/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:26:54,638 INFO [StoreOpener-0d24763c14f02ee7f64a21bcb825ac8e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d24763c14f02ee7f64a21bcb825ac8e columnFamilyName cf 2024-11-16T06:26:54,638 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1038): replaying wal for 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:54,638 DEBUG [StoreOpener-0d24763c14f02ee7f64a21bcb825ac8e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:54,639 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:54,639 INFO [StoreOpener-0d24763c14f02ee7f64a21bcb825ac8e-1 {}] regionserver.HStore(327): Store=0d24763c14f02ee7f64a21bcb825ac8e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:26:54,639 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:54,639 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:54,639 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1048): stopping wal replay for 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:54,639 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1060): Cleaning up temporary data for 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:54,640 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:54,640 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:54,641 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:54,641 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:54,641 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1093): writing seq id for 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:54,642 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:54,643 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:26:54,643 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1114): Opened 151f6e94f7574f3a28f05b933028ac52; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73980896, jitterRate=0.1024012565612793}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:26:54,643 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:54,644 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1006): Region open journal for 151f6e94f7574f3a28f05b933028ac52: Running coprocessor pre-open hook at 1731738414633Writing region info on filesystem at 1731738414633Initializing all the Stores at 1731738414634 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738414634Cleaning up temporary data from old regions at 1731738414639 (+5 ms)Running coprocessor post-open hooks at 1731738414643 (+4 ms)Region opened successfully at 1731738414644 (+1 ms) 2024-11-16T06:26:54,644 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:26:54,645 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52., pid=134, masterSystemTime=1731738414627 2024-11-16T06:26:54,645 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 0d24763c14f02ee7f64a21bcb825ac8e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66604010, jitterRate=-0.007522910833358765}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:26:54,645 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:54,645 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 0d24763c14f02ee7f64a21bcb825ac8e: Running coprocessor pre-open hook at 1731738414635Writing region info on filesystem at 1731738414635Initializing all the Stores at 1731738414636 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738414636Cleaning up temporary data from old regions at 1731738414641 (+5 ms)Running coprocessor post-open hooks at 1731738414645 (+4 ms)Region opened successfully at 1731738414645 2024-11-16T06:26:54,646 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e., pid=135, masterSystemTime=1731738414629 2024-11-16T06:26:54,647 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:54,647 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:54,648 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=151f6e94f7574f3a28f05b933028ac52, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:54,648 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:26:54,648 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:26:54,648 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=0d24763c14f02ee7f64a21bcb825ac8e, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:26:54,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure 151f6e94f7574f3a28f05b933028ac52, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:26:54,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:26:54,651 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-11-16T06:26:54,651 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; OpenRegionProcedure 151f6e94f7574f3a28f05b933028ac52, server=3456ee6a3164,37533,1731738278939 in 175 msec 2024-11-16T06:26:54,652 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=133 2024-11-16T06:26:54,652 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e, server=3456ee6a3164,35939,1731738278639 in 175 msec 2024-11-16T06:26:54,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=151f6e94f7574f3a28f05b933028ac52, ASSIGN in 339 msec 2024-11-16T06:26:54,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=131 2024-11-16T06:26:54,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0d24763c14f02ee7f64a21bcb825ac8e, ASSIGN in 340 msec 2024-11-16T06:26:54,654 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:26:54,654 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738414654"}]},"ts":"1731738414654"} 2024-11-16T06:26:54,656 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-16T06:26:54,656 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:26:54,657 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-16T06:26:54,659 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-16T06:26:54,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-16T06:26:54,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:54,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:54,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:54,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:55,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:55,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:55,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:55,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:55,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 892 msec 2024-11-16T06:26:55,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-16T06:26:55,416 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-16T06:26:55,416 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:55,422 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-16T06:26:55,422 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:55,423 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:26:55,426 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:55,431 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:55,436 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:55,439 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-16T06:26:55,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738415439 (current time:1731738415439). 2024-11-16T06:26:55,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:26:55,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-16T06:26:55,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:26:55,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58960098, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:55,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:26:55,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:26:55,441 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:26:55,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:26:55,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:26:55,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44a1f759, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:55,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:26:55,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:26:55,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:55,443 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58472, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:26:55,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c33b992, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:55,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:26:55,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:26:55,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:55,446 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45138, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:55,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:26:55,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:26:55,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:55,448 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:26:55,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:55,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f56bdaf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:55,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:26:55,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:26:55,449 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:26:55,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:26:55,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:26:55,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@517d432c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:55,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:26:55,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:26:55,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:55,451 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58480, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:26:55,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16c4b94e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:55,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:26:55,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:26:55,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:55,454 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45140, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:55,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:26:55,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:55,456 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55250, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:55,457 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:26:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:26:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:55,458 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:26:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-16T06:26:55,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:26:55,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-16T06:26:55,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-16T06:26:55,460 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:26:55,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-16T06:26:55,462 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:26:55,464 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:26:55,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742176_1352 (size=170) 2024-11-16T06:26:55,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742176_1352 (size=170) 2024-11-16T06:26:55,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742176_1352 (size=170) 2024-11-16T06:26:55,469 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:26:55,470 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 151f6e94f7574f3a28f05b933028ac52}, {pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e}] 2024-11-16T06:26:55,470 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:55,470 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-16T06:26:55,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=137 2024-11-16T06:26:55,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-16T06:26:55,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:55,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:26:55,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 0d24763c14f02ee7f64a21bcb825ac8e: 2024-11-16T06:26:55,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.HRegion(2603): Flush status journal for 151f6e94f7574f3a28f05b933028ac52: 2024-11-16T06:26:55,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-16T06:26:55,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-16T06:26:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-16T06:26:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-16T06:26:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:26:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:26:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:26:55,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:26:55,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742177_1353 (size=71) 2024-11-16T06:26:55,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742178_1354 (size=71) 2024-11-16T06:26:55,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742177_1353 (size=71) 2024-11-16T06:26:55,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742178_1354 (size=71) 2024-11-16T06:26:55,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742178_1354 (size=71) 2024-11-16T06:26:55,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742177_1353 (size=71) 2024-11-16T06:26:55,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:55,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:26:55,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-16T06:26:55,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-16T06:26:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-16T06:26:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=137 2024-11-16T06:26:55,630 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:55,630 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:55,630 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:55,630 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:55,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e in 161 msec 2024-11-16T06:26:55,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=137, resume processing ppid=136 2024-11-16T06:26:55,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 151f6e94f7574f3a28f05b933028ac52 in 161 msec 2024-11-16T06:26:55,633 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:26:55,633 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:26:55,634 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:26:55,634 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:26:55,634 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:55,635 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-16T06:26:55,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742179_1355 (size=63) 2024-11-16T06:26:55,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742179_1355 (size=63) 2024-11-16T06:26:55,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742179_1355 (size=63) 2024-11-16T06:26:55,641 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:26:55,641 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-16T06:26:55,642 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-16T06:26:55,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742180_1356 (size=653) 2024-11-16T06:26:55,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742180_1356 (size=653) 2024-11-16T06:26:55,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742180_1356 (size=653) 2024-11-16T06:26:55,652 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:26:55,656 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:26:55,657 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-16T06:26:55,658 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:26:55,658 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-16T06:26:55,660 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 199 msec 2024-11-16T06:26:55,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-16T06:26:55,776 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-16T06:26:55,791 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37533 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:26:55,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:26:55,796 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:55,798 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-16T06:26:55,798 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:55,798 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:26:55,799 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:55,803 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:55,808 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:55,810 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-16T06:26:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738415810 (current time:1731738415810). 2024-11-16T06:26:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:26:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-16T06:26:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:26:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6da6f8c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:26:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:26:55,811 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:26:55,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:26:55,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:26:55,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38722c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:55,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:26:55,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:26:55,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:55,812 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58494, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:26:55,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f66991d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:55,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:26:55,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:26:55,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:55,815 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45150, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:55,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:26:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:26:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:55,816 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:26:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47688bed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:26:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:26:55,818 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:26:55,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:26:55,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:26:55,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b3778a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:55,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:26:55,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:26:55,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:55,819 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58502, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:26:55,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cb68f9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:55,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:26:55,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:26:55,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:55,822 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45158, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:55,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:26:55,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:55,825 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55264, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:55,826 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:26:55,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:26:55,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:55,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:55,826 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:26:55,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-16T06:26:55,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:26:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-16T06:26:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-16T06:26:55,829 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:26:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-16T06:26:55,830 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:26:55,832 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:26:55,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742181_1357 (size=165) 2024-11-16T06:26:55,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742181_1357 (size=165) 2024-11-16T06:26:55,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742181_1357 (size=165) 2024-11-16T06:26:55,839 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:26:55,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 151f6e94f7574f3a28f05b933028ac52}, {pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e}] 2024-11-16T06:26:55,840 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:55,840 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:55,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-16T06:26:55,993 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-16T06:26:55,993 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=140 2024-11-16T06:26:55,993 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:55,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:26:55,994 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2902): Flushing 151f6e94f7574f3a28f05b933028ac52 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-16T06:26:55,994 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 0d24763c14f02ee7f64a21bcb825ac8e 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-16T06:26:56,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241116bcd9df8223c54f10b9541859caa3a641_151f6e94f7574f3a28f05b933028ac52 is 71, key is 0d5bb2296fc37c9e88ffd00268444477/cf:q/1731738415791/Put/seqid=0 2024-11-16T06:26:56,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411164dea602b50664b32b6eeddecbe012d2f_0d24763c14f02ee7f64a21bcb825ac8e is 71, key is 1623408a5307da55a958ef4154d51c5a/cf:q/1731738415794/Put/seqid=0 2024-11-16T06:26:56,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742182_1358 (size=5102) 2024-11-16T06:26:56,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742182_1358 (size=5102) 2024-11-16T06:26:56,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742182_1358 (size=5102) 2024-11-16T06:26:56,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:56,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742183_1359 (size=8171) 2024-11-16T06:26:56,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742183_1359 (size=8171) 2024-11-16T06:26:56,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742183_1359 (size=8171) 2024-11-16T06:26:56,021 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:56,024 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241116bcd9df8223c54f10b9541859caa3a641_151f6e94f7574f3a28f05b933028ac52 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241116bcd9df8223c54f10b9541859caa3a641_151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:56,025 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411164dea602b50664b32b6eeddecbe012d2f_0d24763c14f02ee7f64a21bcb825ac8e to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411164dea602b50664b32b6eeddecbe012d2f_0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:56,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/.tmp/cf/dbe55b2590c7463e9dbd1bbc5753b475, store: [table=testtb-testExportExpiredSnapshot family=cf region=151f6e94f7574f3a28f05b933028ac52] 2024-11-16T06:26:56,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/.tmp/cf/dbe55b2590c7463e9dbd1bbc5753b475 is 209, key is 05acc9c259e19e482bc9e92f9f066795a/cf:q/1731738415791/Put/seqid=0 2024-11-16T06:26:56,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/.tmp/cf/90d9b9b60b5c4843a74be6eb6376a732, store: [table=testtb-testExportExpiredSnapshot family=cf region=0d24763c14f02ee7f64a21bcb825ac8e] 2024-11-16T06:26:56,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/.tmp/cf/90d9b9b60b5c4843a74be6eb6376a732 is 209, key is 1b21d12e94ab9d340a9d5483925e6de07/cf:q/1731738415794/Put/seqid=0 2024-11-16T06:26:56,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742185_1361 (size=14997) 2024-11-16T06:26:56,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742185_1361 (size=14997) 2024-11-16T06:26:56,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742185_1361 (size=14997) 2024-11-16T06:26:56,034 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/.tmp/cf/90d9b9b60b5c4843a74be6eb6376a732 2024-11-16T06:26:56,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742184_1360 (size=5918) 2024-11-16T06:26:56,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742184_1360 (size=5918) 2024-11-16T06:26:56,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742184_1360 (size=5918) 2024-11-16T06:26:56,039 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/.tmp/cf/90d9b9b60b5c4843a74be6eb6376a732 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/cf/90d9b9b60b5c4843a74be6eb6376a732 2024-11-16T06:26:56,039 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/.tmp/cf/dbe55b2590c7463e9dbd1bbc5753b475 2024-11-16T06:26:56,043 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/cf/90d9b9b60b5c4843a74be6eb6376a732, entries=47, sequenceid=6, filesize=14.6 K 2024-11-16T06:26:56,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/.tmp/cf/dbe55b2590c7463e9dbd1bbc5753b475 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/cf/dbe55b2590c7463e9dbd1bbc5753b475 2024-11-16T06:26:56,044 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 0d24763c14f02ee7f64a21bcb825ac8e in 50ms, sequenceid=6, compaction requested=false 2024-11-16T06:26:56,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-16T06:26:56,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 0d24763c14f02ee7f64a21bcb825ac8e: 2024-11-16T06:26:56,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. for snaptb0-testExportExpiredSnapshot completed. 2024-11-16T06:26:56,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-16T06:26:56,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:26:56,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/cf/90d9b9b60b5c4843a74be6eb6376a732] hfiles 2024-11-16T06:26:56,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/cf/90d9b9b60b5c4843a74be6eb6376a732 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-16T06:26:56,050 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/cf/dbe55b2590c7463e9dbd1bbc5753b475, entries=3, sequenceid=6, filesize=5.8 K 2024-11-16T06:26:56,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742186_1362 (size=110) 2024-11-16T06:26:56,051 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 151f6e94f7574f3a28f05b933028ac52 in 57ms, sequenceid=6, compaction requested=false 2024-11-16T06:26:56,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742186_1362 (size=110) 2024-11-16T06:26:56,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2603): Flush status journal for 151f6e94f7574f3a28f05b933028ac52: 2024-11-16T06:26:56,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742186_1362 (size=110) 2024-11-16T06:26:56,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. for snaptb0-testExportExpiredSnapshot completed. 2024-11-16T06:26:56,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-16T06:26:56,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:26:56,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/cf/dbe55b2590c7463e9dbd1bbc5753b475] hfiles 2024-11-16T06:26:56,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/cf/dbe55b2590c7463e9dbd1bbc5753b475 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-16T06:26:56,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:26:56,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-16T06:26:56,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-16T06:26:56,052 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:56,052 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:56,055 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e in 214 msec 2024-11-16T06:26:56,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742187_1363 (size=110) 2024-11-16T06:26:56,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742187_1363 (size=110) 2024-11-16T06:26:56,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742187_1363 (size=110) 2024-11-16T06:26:56,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:26:56,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=140 2024-11-16T06:26:56,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=140 2024-11-16T06:26:56,058 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:56,058 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:56,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=140, resume processing ppid=139 2024-11-16T06:26:56,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 151f6e94f7574f3a28f05b933028ac52 in 220 msec 2024-11-16T06:26:56,061 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:26:56,061 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:26:56,062 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:26:56,062 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:26:56,062 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:56,063 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411164dea602b50664b32b6eeddecbe012d2f_0d24763c14f02ee7f64a21bcb825ac8e, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241116bcd9df8223c54f10b9541859caa3a641_151f6e94f7574f3a28f05b933028ac52] hfiles 2024-11-16T06:26:56,063 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411164dea602b50664b32b6eeddecbe012d2f_0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:26:56,063 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241116bcd9df8223c54f10b9541859caa3a641_151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:26:56,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742188_1364 (size=294) 2024-11-16T06:26:56,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742188_1364 (size=294) 2024-11-16T06:26:56,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742188_1364 (size=294) 2024-11-16T06:26:56,070 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:26:56,070 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-16T06:26:56,071 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-16T06:26:56,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742189_1365 (size=963) 2024-11-16T06:26:56,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742189_1365 (size=963) 2024-11-16T06:26:56,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742189_1365 (size=963) 2024-11-16T06:26:56,084 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:26:56,090 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:26:56,090 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-16T06:26:56,091 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:26:56,091 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-16T06:26:56,093 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 265 msec 2024-11-16T06:26:56,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-16T06:26:56,145 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-16T06:26:56,147 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:26:56,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-16T06:26:56,149 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:26:56,149 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 142 2024-11-16T06:26:56,149 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:26:56,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-16T06:26:56,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742190_1366 (size=436) 2024-11-16T06:26:56,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742190_1366 (size=436) 2024-11-16T06:26:56,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742190_1366 (size=436) 2024-11-16T06:26:56,157 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 15023cc25d03bb61082803e08a9c5ef3, NAME => 'testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:56,157 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 28ca42946236efd20803c06170fc6c61, NAME => 'testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:26:56,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742192_1368 (size=61) 2024-11-16T06:26:56,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742191_1367 (size=61) 2024-11-16T06:26:56,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742192_1368 (size=61) 2024-11-16T06:26:56,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742191_1367 (size=61) 2024-11-16T06:26:56,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742192_1368 (size=61) 2024-11-16T06:26:56,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742191_1367 (size=61) 2024-11-16T06:26:56,164 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:56,164 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:56,164 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 28ca42946236efd20803c06170fc6c61, disabling compactions & flushes 2024-11-16T06:26:56,164 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:26:56,164 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 15023cc25d03bb61082803e08a9c5ef3, disabling compactions & flushes 2024-11-16T06:26:56,164 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:26:56,164 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:26:56,164 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. after waiting 0 ms 2024-11-16T06:26:56,164 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:26:56,164 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:26:56,164 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. after waiting 0 ms 2024-11-16T06:26:56,164 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:26:56,164 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:26:56,164 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:26:56,164 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 28ca42946236efd20803c06170fc6c61: Waiting for close lock at 1731738416164Disabling compacts and flushes for region at 1731738416164Disabling writes for close at 1731738416164Writing region close event to WAL at 1731738416164Closed at 1731738416164 2024-11-16T06:26:56,164 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 15023cc25d03bb61082803e08a9c5ef3: Waiting for close lock at 1731738416164Disabling compacts and flushes for region at 1731738416164Disabling writes for close at 1731738416164Writing region close event to WAL at 1731738416164Closed at 1731738416164 2024-11-16T06:26:56,165 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:26:56,165 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1731738416165"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738416165"}]},"ts":"1731738416165"} 2024-11-16T06:26:56,165 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1731738416165"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738416165"}]},"ts":"1731738416165"} 2024-11-16T06:26:56,167 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-16T06:26:56,168 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:26:56,168 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738416168"}]},"ts":"1731738416168"} 2024-11-16T06:26:56,169 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-16T06:26:56,170 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:26:56,171 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:26:56,171 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:26:56,171 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:26:56,171 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:26:56,171 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:26:56,171 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:26:56,171 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:26:56,171 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:26:56,171 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:26:56,171 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:26:56,171 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=28ca42946236efd20803c06170fc6c61, ASSIGN}, {pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=15023cc25d03bb61082803e08a9c5ef3, ASSIGN}] 2024-11-16T06:26:56,172 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=15023cc25d03bb61082803e08a9c5ef3, ASSIGN 2024-11-16T06:26:56,173 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=28ca42946236efd20803c06170fc6c61, ASSIGN 2024-11-16T06:26:56,173 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=15023cc25d03bb61082803e08a9c5ef3, ASSIGN; state=OFFLINE, location=3456ee6a3164,35939,1731738278639; forceNewPlan=false, retain=false 2024-11-16T06:26:56,173 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=28ca42946236efd20803c06170fc6c61, ASSIGN; state=OFFLINE, location=3456ee6a3164,37533,1731738278939; forceNewPlan=false, retain=false 2024-11-16T06:26:56,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-16T06:26:56,324 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-16T06:26:56,325 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=15023cc25d03bb61082803e08a9c5ef3, regionState=OPENING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:26:56,325 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=28ca42946236efd20803c06170fc6c61, regionState=OPENING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:56,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=28ca42946236efd20803c06170fc6c61, ASSIGN because future has completed 2024-11-16T06:26:56,330 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=145, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure 28ca42946236efd20803c06170fc6c61, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:26:56,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=15023cc25d03bb61082803e08a9c5ef3, ASSIGN because future has completed 2024-11-16T06:26:56,333 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 15023cc25d03bb61082803e08a9c5ef3, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:26:56,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-16T06:26:56,488 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:26:56,488 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7752): Opening region: {ENCODED => 28ca42946236efd20803c06170fc6c61, NAME => 'testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61.', STARTKEY => '', ENDKEY => '1'} 2024-11-16T06:26:56,488 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. service=AccessControlService 2024-11-16T06:26:56,488 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:26:56,489 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:56,489 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:56,489 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7794): checking encryption for 28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:56,489 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7797): checking classloading for 28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:56,490 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:26:56,490 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 15023cc25d03bb61082803e08a9c5ef3, NAME => 'testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3.', STARTKEY => '1', ENDKEY => ''} 2024-11-16T06:26:56,490 INFO [StoreOpener-28ca42946236efd20803c06170fc6c61-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:56,491 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. service=AccessControlService 2024-11-16T06:26:56,491 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:26:56,491 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:56,491 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:26:56,491 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:56,491 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:56,492 INFO [StoreOpener-28ca42946236efd20803c06170fc6c61-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28ca42946236efd20803c06170fc6c61 columnFamilyName cf 2024-11-16T06:26:56,493 INFO [StoreOpener-15023cc25d03bb61082803e08a9c5ef3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:56,493 DEBUG [StoreOpener-28ca42946236efd20803c06170fc6c61-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:56,494 INFO [StoreOpener-15023cc25d03bb61082803e08a9c5ef3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 15023cc25d03bb61082803e08a9c5ef3 columnFamilyName cf 2024-11-16T06:26:56,494 INFO [StoreOpener-28ca42946236efd20803c06170fc6c61-1 {}] regionserver.HStore(327): Store=28ca42946236efd20803c06170fc6c61/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:26:56,494 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1038): replaying wal for 28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:56,494 DEBUG [StoreOpener-15023cc25d03bb61082803e08a9c5ef3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:56,495 INFO [StoreOpener-15023cc25d03bb61082803e08a9c5ef3-1 {}] regionserver.HStore(327): Store=15023cc25d03bb61082803e08a9c5ef3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:26:56,495 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:56,495 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:56,495 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:56,495 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1048): stopping wal replay for 28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:56,496 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1060): Cleaning up temporary data for 28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:56,496 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:56,496 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:56,496 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:56,496 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:56,497 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1093): writing seq id for 28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:56,497 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:56,499 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/28ca42946236efd20803c06170fc6c61/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:26:56,499 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/15023cc25d03bb61082803e08a9c5ef3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:26:56,499 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 15023cc25d03bb61082803e08a9c5ef3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66065448, jitterRate=-0.015548110008239746}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:26:56,499 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:56,499 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1114): Opened 28ca42946236efd20803c06170fc6c61; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68187008, jitterRate=0.016065597534179688}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:26:56,499 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:56,500 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1006): Region open journal for 28ca42946236efd20803c06170fc6c61: Running coprocessor pre-open hook at 1731738416489Writing region info on filesystem at 1731738416489Initializing all the Stores at 1731738416490 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738416490Cleaning up temporary data from old regions at 1731738416496 (+6 ms)Running coprocessor post-open hooks at 1731738416499 (+3 ms)Region opened successfully at 1731738416500 (+1 ms) 2024-11-16T06:26:56,500 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 15023cc25d03bb61082803e08a9c5ef3: Running coprocessor pre-open hook at 1731738416491Writing region info on filesystem at 1731738416491Initializing all the Stores at 1731738416492 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738416492Cleaning up temporary data from old regions at 1731738416496 (+4 ms)Running coprocessor post-open hooks at 1731738416499 (+3 ms)Region opened successfully at 1731738416500 (+1 ms) 2024-11-16T06:26:56,501 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3., pid=146, masterSystemTime=1731738416487 2024-11-16T06:26:56,501 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61., pid=145, masterSystemTime=1731738416485 2024-11-16T06:26:56,502 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:26:56,502 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:26:56,503 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=15023cc25d03bb61082803e08a9c5ef3, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:26:56,504 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:26:56,504 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:26:56,504 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=28ca42946236efd20803c06170fc6c61, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:26:56,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 15023cc25d03bb61082803e08a9c5ef3, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:26:56,506 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure 28ca42946236efd20803c06170fc6c61, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:26:56,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=144 2024-11-16T06:26:56,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 15023cc25d03bb61082803e08a9c5ef3, server=3456ee6a3164,35939,1731738278639 in 173 msec 2024-11-16T06:26:56,509 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-11-16T06:26:56,509 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=15023cc25d03bb61082803e08a9c5ef3, ASSIGN in 336 msec 2024-11-16T06:26:56,509 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; OpenRegionProcedure 28ca42946236efd20803c06170fc6c61, server=3456ee6a3164,37533,1731738278939 in 177 msec 2024-11-16T06:26:56,510 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=143, resume processing ppid=142 2024-11-16T06:26:56,510 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=28ca42946236efd20803c06170fc6c61, ASSIGN in 338 msec 2024-11-16T06:26:56,511 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:26:56,511 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738416511"}]},"ts":"1731738416511"} 2024-11-16T06:26:56,512 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-16T06:26:56,513 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:26:56,514 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-16T06:26:56,517 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-16T06:26:56,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:56,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:56,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:56,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:26:56,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:56,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:56,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:56,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:56,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:56,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:56,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:56,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:26:56,524 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 375 msec 2024-11-16T06:26:56,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-16T06:26:56,776 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-16T06:26:56,777 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:56,782 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-16T06:26:56,782 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:26:56,782 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:26:56,785 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:56,794 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:56,801 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:56,812 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37533 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:26:56,813 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:26:56,814 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:56,818 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-16T06:26:56,818 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:26:56,818 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:26:56,820 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:56,825 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-16T06:26:56,830 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-16T06:26:56,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-16T06:26:56,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:26:56,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1992954f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:56,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:26:56,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:26:56,832 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:26:56,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:26:56,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:26:56,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@165f3a4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:56,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:26:56,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:26:56,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:56,833 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58524, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:26:56,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e130324, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:56,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:26:56,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:26:56,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:56,836 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45170, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:56,837 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:26:56,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:26:56,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:56,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:56,837 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:26:56,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d9912c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:56,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:26:56,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:26:56,839 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:26:56,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:26:56,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:26:56,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69bb1f93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:56,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:26:56,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:26:56,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:56,840 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58542, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:26:56,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16b3dd5a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:26:56,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:26:56,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:26:56,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:56,844 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45178, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:56,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:26:56,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:26:56,846 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55276, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:26:56,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:26:56,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:26:56,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:56,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:26:56,847 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:26:56,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-16T06:26:56,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:26:56,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-16T06:26:56,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-16T06:26:56,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-16T06:26:56,851 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:26:56,852 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:26:56,854 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:26:56,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742193_1369 (size=152) 2024-11-16T06:26:56,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742193_1369 (size=152) 2024-11-16T06:26:56,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742193_1369 (size=152) 2024-11-16T06:26:56,860 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:26:56,860 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28ca42946236efd20803c06170fc6c61}, {pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15023cc25d03bb61082803e08a9c5ef3}] 2024-11-16T06:26:56,861 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:56,861 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:56,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-16T06:26:57,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=148 2024-11-16T06:26:57,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=149 2024-11-16T06:26:57,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:26:57,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:26:57,015 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2902): Flushing 28ca42946236efd20803c06170fc6c61 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-16T06:26:57,015 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2902): Flushing 15023cc25d03bb61082803e08a9c5ef3 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-16T06:26:57,036 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241116ccf881f3e7eb46fa8af49db02fa6eb47_28ca42946236efd20803c06170fc6c61 is 71, key is 0421dc6a0a48cc0b9654eb74579de1b5/cf:q/1731738416811/Put/seqid=0 2024-11-16T06:26:57,038 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241116af760f2c752048e3816066e0304e45ab_15023cc25d03bb61082803e08a9c5ef3 is 71, key is 12264f405be6288c00e75b15ee25b334/cf:q/1731738416813/Put/seqid=0 2024-11-16T06:26:57,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742194_1370 (size=5172) 2024-11-16T06:26:57,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742194_1370 (size=5172) 2024-11-16T06:26:57,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742194_1370 (size=5172) 2024-11-16T06:26:57,043 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:57,046 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241116ccf881f3e7eb46fa8af49db02fa6eb47_28ca42946236efd20803c06170fc6c61 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241116ccf881f3e7eb46fa8af49db02fa6eb47_28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:57,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/28ca42946236efd20803c06170fc6c61/.tmp/cf/a08c1a1234d444f3b230dd9f3d2176ce, store: [table=testExportExpiredSnapshot family=cf region=28ca42946236efd20803c06170fc6c61] 2024-11-16T06:26:57,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/28ca42946236efd20803c06170fc6c61/.tmp/cf/a08c1a1234d444f3b230dd9f3d2176ce is 202, key is 0e03f746086b2aa70da71d102b62c473e/cf:q/1731738416811/Put/seqid=0 2024-11-16T06:26:57,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742195_1371 (size=8101) 2024-11-16T06:26:57,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742195_1371 (size=8101) 2024-11-16T06:26:57,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742195_1371 (size=8101) 2024-11-16T06:26:57,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:57,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742196_1372 (size=6088) 2024-11-16T06:26:57,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742196_1372 (size=6088) 2024-11-16T06:26:57,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742196_1372 (size=6088) 2024-11-16T06:26:57,052 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/28ca42946236efd20803c06170fc6c61/.tmp/cf/a08c1a1234d444f3b230dd9f3d2176ce 2024-11-16T06:26:57,055 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241116af760f2c752048e3816066e0304e45ab_15023cc25d03bb61082803e08a9c5ef3 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241116af760f2c752048e3816066e0304e45ab_15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:57,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/15023cc25d03bb61082803e08a9c5ef3/.tmp/cf/9375215c87d04e9db8023e9c0bf8e422, store: [table=testExportExpiredSnapshot family=cf region=15023cc25d03bb61082803e08a9c5ef3] 2024-11-16T06:26:57,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/15023cc25d03bb61082803e08a9c5ef3/.tmp/cf/9375215c87d04e9db8023e9c0bf8e422 is 202, key is 1d01f3af0c9af2e6f4e2d4b1f248c87b1/cf:q/1731738416813/Put/seqid=0 2024-11-16T06:26:57,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/28ca42946236efd20803c06170fc6c61/.tmp/cf/a08c1a1234d444f3b230dd9f3d2176ce as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/28ca42946236efd20803c06170fc6c61/cf/a08c1a1234d444f3b230dd9f3d2176ce 2024-11-16T06:26:57,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742197_1373 (size=14463) 2024-11-16T06:26:57,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742197_1373 (size=14463) 2024-11-16T06:26:57,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742197_1373 (size=14463) 2024-11-16T06:26:57,061 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/28ca42946236efd20803c06170fc6c61/cf/a08c1a1234d444f3b230dd9f3d2176ce, entries=4, sequenceid=5, filesize=5.9 K 2024-11-16T06:26:57,062 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/15023cc25d03bb61082803e08a9c5ef3/.tmp/cf/9375215c87d04e9db8023e9c0bf8e422 2024-11-16T06:26:57,062 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 28ca42946236efd20803c06170fc6c61 in 47ms, sequenceid=5, compaction requested=false 2024-11-16T06:26:57,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-16T06:26:57,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2603): Flush status journal for 28ca42946236efd20803c06170fc6c61: 2024-11-16T06:26:57,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. for snapshot-testExportExpiredSnapshot completed. 2024-11-16T06:26:57,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-16T06:26:57,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:26:57,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/28ca42946236efd20803c06170fc6c61/cf/a08c1a1234d444f3b230dd9f3d2176ce] hfiles 2024-11-16T06:26:57,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/28ca42946236efd20803c06170fc6c61/cf/a08c1a1234d444f3b230dd9f3d2176ce for snapshot=snapshot-testExportExpiredSnapshot 2024-11-16T06:26:57,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/15023cc25d03bb61082803e08a9c5ef3/.tmp/cf/9375215c87d04e9db8023e9c0bf8e422 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/15023cc25d03bb61082803e08a9c5ef3/cf/9375215c87d04e9db8023e9c0bf8e422 2024-11-16T06:26:57,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742198_1374 (size=103) 2024-11-16T06:26:57,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742198_1374 (size=103) 2024-11-16T06:26:57,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742198_1374 (size=103) 2024-11-16T06:26:57,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:26:57,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=148 2024-11-16T06:26:57,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=148 2024-11-16T06:26:57,069 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:57,069 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:57,072 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 28ca42946236efd20803c06170fc6c61 in 210 msec 2024-11-16T06:26:57,073 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/15023cc25d03bb61082803e08a9c5ef3/cf/9375215c87d04e9db8023e9c0bf8e422, entries=46, sequenceid=5, filesize=14.1 K 2024-11-16T06:26:57,074 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 15023cc25d03bb61082803e08a9c5ef3 in 58ms, sequenceid=5, compaction requested=false 2024-11-16T06:26:57,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2603): Flush status journal for 15023cc25d03bb61082803e08a9c5ef3: 2024-11-16T06:26:57,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. for snapshot-testExportExpiredSnapshot completed. 2024-11-16T06:26:57,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-16T06:26:57,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:26:57,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/15023cc25d03bb61082803e08a9c5ef3/cf/9375215c87d04e9db8023e9c0bf8e422] hfiles 2024-11-16T06:26:57,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/15023cc25d03bb61082803e08a9c5ef3/cf/9375215c87d04e9db8023e9c0bf8e422 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-16T06:26:57,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742199_1375 (size=103) 2024-11-16T06:26:57,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742199_1375 (size=103) 2024-11-16T06:26:57,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742199_1375 (size=103) 2024-11-16T06:26:57,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:26:57,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-16T06:26:57,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=149 2024-11-16T06:26:57,081 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:57,081 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:57,083 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=147 2024-11-16T06:26:57,083 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 15023cc25d03bb61082803e08a9c5ef3 in 222 msec 2024-11-16T06:26:57,083 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:26:57,084 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:26:57,085 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:26:57,086 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:26:57,086 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:26:57,087 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241116af760f2c752048e3816066e0304e45ab_15023cc25d03bb61082803e08a9c5ef3, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241116ccf881f3e7eb46fa8af49db02fa6eb47_28ca42946236efd20803c06170fc6c61] hfiles 2024-11-16T06:26:57,087 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241116af760f2c752048e3816066e0304e45ab_15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:26:57,087 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241116ccf881f3e7eb46fa8af49db02fa6eb47_28ca42946236efd20803c06170fc6c61 2024-11-16T06:26:57,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742200_1376 (size=287) 2024-11-16T06:26:57,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742200_1376 (size=287) 2024-11-16T06:26:57,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742200_1376 (size=287) 2024-11-16T06:26:57,094 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:26:57,094 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-16T06:26:57,095 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-16T06:26:57,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742201_1377 (size=935) 2024-11-16T06:26:57,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742201_1377 (size=935) 2024-11-16T06:26:57,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742201_1377 (size=935) 2024-11-16T06:26:57,111 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:26:57,116 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:26:57,116 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-16T06:26:57,118 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:26:57,118 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-16T06:26:57,119 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 269 msec 2024-11-16T06:26:57,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-16T06:26:57,165 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-16T06:26:57,615 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0006_000001 (auth:SIMPLE) from 127.0.0.1:52778 2024-11-16T06:26:57,627 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_2/usercache/jenkins/appcache/application_1731738285782_0006/container_1731738285782_0006_01_000001/launch_container.sh] 2024-11-16T06:26:57,627 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_2/usercache/jenkins/appcache/application_1731738285782_0006/container_1731738285782_0006_01_000001/container_tokens] 2024-11-16T06:26:57,627 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_2/usercache/jenkins/appcache/application_1731738285782_0006/container_1731738285782_0006_01_000001/sysfs] 2024-11-16T06:26:58,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-16T06:26:58,275 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-16T06:26:58,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-16T06:26:58,277 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-16T06:26:58,279 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-16T06:26:58,280 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-16T06:26:59,029 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:27:06,746 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:27:07,172 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738427172 2024-11-16T06:27:07,172 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:36767, tgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738427172, rawTgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738427172, srcFsUri=hdfs://localhost:36767, srcDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:27:07,200 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:36767, inputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:27:07,200 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738427172, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738427172/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-16T06:27:07,202 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-16T06:27:07,203 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:951) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1096) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:314) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T06:27:07,205 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-16T06:27:07,208 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738427208"}]},"ts":"1731738427208"} 2024-11-16T06:27:07,211 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-16T06:27:07,211 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-16T06:27:07,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-16T06:27:07,214 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=151f6e94f7574f3a28f05b933028ac52, UNASSIGN}, {pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0d24763c14f02ee7f64a21bcb825ac8e, UNASSIGN}] 2024-11-16T06:27:07,216 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0d24763c14f02ee7f64a21bcb825ac8e, UNASSIGN 2024-11-16T06:27:07,216 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=151f6e94f7574f3a28f05b933028ac52, UNASSIGN 2024-11-16T06:27:07,217 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=151f6e94f7574f3a28f05b933028ac52, regionState=CLOSING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:27:07,217 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=0d24763c14f02ee7f64a21bcb825ac8e, regionState=CLOSING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:27:07,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0d24763c14f02ee7f64a21bcb825ac8e, UNASSIGN because future has completed 2024-11-16T06:27:07,220 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:27:07,220 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:27:07,221 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=151f6e94f7574f3a28f05b933028ac52, UNASSIGN because future has completed 2024-11-16T06:27:07,228 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:27:07,228 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure 151f6e94f7574f3a28f05b933028ac52, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:27:07,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-16T06:27:07,380 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(122): Close 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:27:07,380 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:27:07,380 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1722): Closing 0d24763c14f02ee7f64a21bcb825ac8e, disabling compactions & flushes 2024-11-16T06:27:07,380 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:27:07,380 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:27:07,380 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. after waiting 0 ms 2024-11-16T06:27:07,380 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:27:07,381 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(122): Close 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:27:07,381 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:27:07,381 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1722): Closing 151f6e94f7574f3a28f05b933028ac52, disabling compactions & flushes 2024-11-16T06:27:07,381 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:27:07,381 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:27:07,381 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. after waiting 0 ms 2024-11-16T06:27:07,381 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:27:07,389 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:27:07,390 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:27:07,390 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52. 2024-11-16T06:27:07,390 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1676): Region close journal for 151f6e94f7574f3a28f05b933028ac52: Waiting for close lock at 1731738427381Running coprocessor pre-close hooks at 1731738427381Disabling compacts and flushes for region at 1731738427381Disabling writes for close at 1731738427381Writing region close event to WAL at 1731738427382 (+1 ms)Running coprocessor post-close hooks at 1731738427390 (+8 ms)Closed at 1731738427390 2024-11-16T06:27:07,392 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:27:07,393 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(157): Closed 151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:27:07,393 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:27:07,394 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e. 2024-11-16T06:27:07,394 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1676): Region close journal for 0d24763c14f02ee7f64a21bcb825ac8e: Waiting for close lock at 1731738427380Running coprocessor pre-close hooks at 1731738427380Disabling compacts and flushes for region at 1731738427380Disabling writes for close at 1731738427380Writing region close event to WAL at 1731738427382 (+2 ms)Running coprocessor post-close hooks at 1731738427393 (+11 ms)Closed at 1731738427394 (+1 ms) 2024-11-16T06:27:07,394 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=151f6e94f7574f3a28f05b933028ac52, regionState=CLOSED 2024-11-16T06:27:07,399 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(157): Closed 0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:27:07,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure 151f6e94f7574f3a28f05b933028ac52, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:27:07,400 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=0d24763c14f02ee7f64a21bcb825ac8e, regionState=CLOSED 2024-11-16T06:27:07,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:27:07,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=155, resume processing ppid=152 2024-11-16T06:27:07,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, ppid=152, state=SUCCESS, hasLock=false; CloseRegionProcedure 151f6e94f7574f3a28f05b933028ac52, server=3456ee6a3164,37533,1731738278939 in 173 msec 2024-11-16T06:27:07,408 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-11-16T06:27:07,408 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; CloseRegionProcedure 0d24763c14f02ee7f64a21bcb825ac8e, server=3456ee6a3164,35939,1731738278639 in 185 msec 2024-11-16T06:27:07,410 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=151f6e94f7574f3a28f05b933028ac52, UNASSIGN in 192 msec 2024-11-16T06:27:07,412 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=151 2024-11-16T06:27:07,412 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0d24763c14f02ee7f64a21bcb825ac8e, UNASSIGN in 194 msec 2024-11-16T06:27:07,416 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-11-16T06:27:07,416 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 201 msec 2024-11-16T06:27:07,422 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738427421"}]},"ts":"1731738427421"} 2024-11-16T06:27:07,424 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-16T06:27:07,424 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-16T06:27:07,427 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 220 msec 2024-11-16T06:27:07,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-16T06:27:07,525 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-16T06:27:07,525 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,527 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,528 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=156, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,531 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,533 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:27:07,533 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:27:07,535 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/recovered.edits] 2024-11-16T06:27:07,535 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/recovered.edits] 2024-11-16T06:27:07,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-16T06:27:07,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-16T06:27:07,540 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/cf/dbe55b2590c7463e9dbd1bbc5753b475 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/cf/dbe55b2590c7463e9dbd1bbc5753b475 2024-11-16T06:27:07,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-16T06:27:07,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-16T06:27:07,541 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/cf/90d9b9b60b5c4843a74be6eb6376a732 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/cf/90d9b9b60b5c4843a74be6eb6376a732 2024-11-16T06:27:07,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:07,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:07,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:07,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:07,543 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:07,543 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:07,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-16T06:27:07,545 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:07,545 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:07,547 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52/recovered.edits/9.seqid 2024-11-16T06:27:07,548 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:27:07,549 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e/recovered.edits/9.seqid 2024-11-16T06:27:07,550 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportExpiredSnapshot/0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:27:07,550 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-16T06:27:07,551 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-11-16T06:27:07,551 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-11-16T06:27:07,555 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411164dea602b50664b32b6eeddecbe012d2f_0d24763c14f02ee7f64a21bcb825ac8e to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411164dea602b50664b32b6eeddecbe012d2f_0d24763c14f02ee7f64a21bcb825ac8e 2024-11-16T06:27:07,558 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241116bcd9df8223c54f10b9541859caa3a641_151f6e94f7574f3a28f05b933028ac52 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241116bcd9df8223c54f10b9541859caa3a641_151f6e94f7574f3a28f05b933028ac52 2024-11-16T06:27:07,559 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-11-16T06:27:07,561 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=156, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,564 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-16T06:27:07,566 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-16T06:27:07,568 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=156, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,568 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-16T06:27:07,568 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738427568"}]},"ts":"9223372036854775807"} 2024-11-16T06:27:07,568 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738427568"}]},"ts":"9223372036854775807"} 2024-11-16T06:27:07,571 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-16T06:27:07,571 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 151f6e94f7574f3a28f05b933028ac52, NAME => 'testtb-testExportExpiredSnapshot,,1731738414272.151f6e94f7574f3a28f05b933028ac52.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 0d24763c14f02ee7f64a21bcb825ac8e, NAME => 'testtb-testExportExpiredSnapshot,1,1731738414272.0d24763c14f02ee7f64a21bcb825ac8e.', STARTKEY => '1', ENDKEY => ''}] 2024-11-16T06:27:07,572 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-16T06:27:07,572 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731738427572"}]},"ts":"9223372036854775807"} 2024-11-16T06:27:07,575 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-16T06:27:07,576 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=156, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 51 msec 2024-11-16T06:27:07,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-16T06:27:07,655 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-16T06:27:07,655 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-16T06:27:07,670 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-16T06:27:07,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-16T06:27:07,675 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-16T06:27:07,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-16T06:27:07,682 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-16T06:27:07,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-16T06:27:07,707 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=793 (was 801), OpenFileDescriptor=791 (was 805), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=461 (was 527), ProcessCount=16 (was 16), AvailableMemoryMB=981 (was 824) - AvailableMemoryMB LEAK? - 2024-11-16T06:27:07,707 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-11-16T06:27:07,723 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=793, OpenFileDescriptor=791, MaxFileDescriptor=1048576, SystemLoadAverage=461, ProcessCount=16, AvailableMemoryMB=980 2024-11-16T06:27:07,723 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-11-16T06:27:07,725 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:27:07,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-16T06:27:07,727 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:27:07,727 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 157 2024-11-16T06:27:07,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-16T06:27:07,729 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:27:07,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742202_1378 (size=448) 2024-11-16T06:27:07,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742202_1378 (size=448) 2024-11-16T06:27:07,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742202_1378 (size=448) 2024-11-16T06:27:07,750 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => dc70c908a8ef2bca6135951fdf7bb226, NAME => 'testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:27:07,754 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 7a242737ed7817de1178937e7926f1a3, NAME => 'testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:27:07,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742203_1379 (size=73) 2024-11-16T06:27:07,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742203_1379 (size=73) 2024-11-16T06:27:07,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742203_1379 (size=73) 2024-11-16T06:27:07,819 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:27:07,819 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing dc70c908a8ef2bca6135951fdf7bb226, disabling compactions & flushes 2024-11-16T06:27:07,819 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:07,819 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:07,819 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. after waiting 0 ms 2024-11-16T06:27:07,819 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:07,819 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:07,819 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for dc70c908a8ef2bca6135951fdf7bb226: Waiting for close lock at 1731738427819Disabling compacts and flushes for region at 1731738427819Disabling writes for close at 1731738427819Writing region close event to WAL at 1731738427819Closed at 1731738427819 2024-11-16T06:27:07,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742204_1380 (size=73) 2024-11-16T06:27:07,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742204_1380 (size=73) 2024-11-16T06:27:07,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742204_1380 (size=73) 2024-11-16T06:27:07,827 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:27:07,827 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 7a242737ed7817de1178937e7926f1a3, disabling compactions & flushes 2024-11-16T06:27:07,827 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:07,827 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:07,827 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. after waiting 0 ms 2024-11-16T06:27:07,827 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:07,827 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:07,827 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 7a242737ed7817de1178937e7926f1a3: Waiting for close lock at 1731738427827Disabling compacts and flushes for region at 1731738427827Disabling writes for close at 1731738427827Writing region close event to WAL at 1731738427827Closed at 1731738427827 2024-11-16T06:27:07,829 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:27:07,830 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1731738427829"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738427829"}]},"ts":"1731738427829"} 2024-11-16T06:27:07,830 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1731738427829"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738427829"}]},"ts":"1731738427829"} 2024-11-16T06:27:07,833 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-16T06:27:07,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-16T06:27:07,834 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:27:07,835 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738427834"}]},"ts":"1731738427834"} 2024-11-16T06:27:07,837 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-16T06:27:07,838 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:27:07,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:27:07,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:27:07,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:27:07,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:27:07,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:27:07,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:27:07,840 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:27:07,840 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:27:07,840 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:27:07,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:27:07,840 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dc70c908a8ef2bca6135951fdf7bb226, ASSIGN}, {pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7a242737ed7817de1178937e7926f1a3, ASSIGN}] 2024-11-16T06:27:07,842 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7a242737ed7817de1178937e7926f1a3, ASSIGN 2024-11-16T06:27:07,842 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dc70c908a8ef2bca6135951fdf7bb226, ASSIGN 2024-11-16T06:27:07,843 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dc70c908a8ef2bca6135951fdf7bb226, ASSIGN; state=OFFLINE, location=3456ee6a3164,40155,1731738278871; forceNewPlan=false, retain=false 2024-11-16T06:27:07,843 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7a242737ed7817de1178937e7926f1a3, ASSIGN; state=OFFLINE, location=3456ee6a3164,37533,1731738278939; forceNewPlan=false, retain=false 2024-11-16T06:27:07,994 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-16T06:27:07,994 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=7a242737ed7817de1178937e7926f1a3, regionState=OPENING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:27:07,994 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=dc70c908a8ef2bca6135951fdf7bb226, regionState=OPENING, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:27:07,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7a242737ed7817de1178937e7926f1a3, ASSIGN because future has completed 2024-11-16T06:27:07,997 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7a242737ed7817de1178937e7926f1a3, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:27:07,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dc70c908a8ef2bca6135951fdf7bb226, ASSIGN because future has completed 2024-11-16T06:27:07,998 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=161, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure dc70c908a8ef2bca6135951fdf7bb226, server=3456ee6a3164,40155,1731738278871}] 2024-11-16T06:27:08,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-16T06:27:08,153 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:08,153 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7752): Opening region: {ENCODED => 7a242737ed7817de1178937e7926f1a3, NAME => 'testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3.', STARTKEY => '1', ENDKEY => ''} 2024-11-16T06:27:08,154 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. service=AccessControlService 2024-11-16T06:27:08,154 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:27:08,154 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,154 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:27:08,154 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7794): checking encryption for 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,154 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7797): checking classloading for 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,156 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:08,156 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7752): Opening region: {ENCODED => dc70c908a8ef2bca6135951fdf7bb226, NAME => 'testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226.', STARTKEY => '', ENDKEY => '1'} 2024-11-16T06:27:08,156 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. service=AccessControlService 2024-11-16T06:27:08,157 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:27:08,157 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,157 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:27:08,157 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7794): checking encryption for dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,157 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7797): checking classloading for dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,161 INFO [StoreOpener-7a242737ed7817de1178937e7926f1a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,163 INFO [StoreOpener-7a242737ed7817de1178937e7926f1a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7a242737ed7817de1178937e7926f1a3 columnFamilyName cf 2024-11-16T06:27:08,164 DEBUG [StoreOpener-7a242737ed7817de1178937e7926f1a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:27:08,165 INFO [StoreOpener-7a242737ed7817de1178937e7926f1a3-1 {}] regionserver.HStore(327): Store=7a242737ed7817de1178937e7926f1a3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:27:08,165 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1038): replaying wal for 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,166 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,166 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,167 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1048): stopping wal replay for 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,167 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1060): Cleaning up temporary data for 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,169 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1093): writing seq id for 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,177 INFO [StoreOpener-dc70c908a8ef2bca6135951fdf7bb226-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,185 INFO [StoreOpener-dc70c908a8ef2bca6135951fdf7bb226-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dc70c908a8ef2bca6135951fdf7bb226 columnFamilyName cf 2024-11-16T06:27:08,185 DEBUG [StoreOpener-dc70c908a8ef2bca6135951fdf7bb226-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:27:08,186 INFO [StoreOpener-dc70c908a8ef2bca6135951fdf7bb226-1 {}] regionserver.HStore(327): Store=dc70c908a8ef2bca6135951fdf7bb226/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:27:08,186 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1038): replaying wal for dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,187 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,188 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,188 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1048): stopping wal replay for dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,188 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1060): Cleaning up temporary data for dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,190 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1093): writing seq id for dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,194 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:27:08,194 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1114): Opened dc70c908a8ef2bca6135951fdf7bb226; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71327219, jitterRate=0.06285838782787323}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:27:08,194 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,195 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1006): Region open journal for dc70c908a8ef2bca6135951fdf7bb226: Running coprocessor pre-open hook at 1731738428157Writing region info on filesystem at 1731738428157Initializing all the Stores at 1731738428158 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738428158Cleaning up temporary data from old regions at 1731738428188 (+30 ms)Running coprocessor post-open hooks at 1731738428194 (+6 ms)Region opened successfully at 1731738428195 (+1 ms) 2024-11-16T06:27:08,196 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226., pid=161, masterSystemTime=1731738428151 2024-11-16T06:27:08,200 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:08,200 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:08,201 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=dc70c908a8ef2bca6135951fdf7bb226, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:27:08,201 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:27:08,202 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1114): Opened 7a242737ed7817de1178937e7926f1a3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68859632, jitterRate=0.026088476181030273}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:27:08,202 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,202 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1006): Region open journal for 7a242737ed7817de1178937e7926f1a3: Running coprocessor pre-open hook at 1731738428154Writing region info on filesystem at 1731738428154Initializing all the Stores at 1731738428156 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738428156Cleaning up temporary data from old regions at 1731738428167 (+11 ms)Running coprocessor post-open hooks at 1731738428202 (+35 ms)Region opened successfully at 1731738428202 2024-11-16T06:27:08,203 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3., pid=160, masterSystemTime=1731738428150 2024-11-16T06:27:08,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=161, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure dc70c908a8ef2bca6135951fdf7bb226, server=3456ee6a3164,40155,1731738278871 because future has completed 2024-11-16T06:27:08,205 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:08,205 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:08,207 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=7a242737ed7817de1178937e7926f1a3, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:27:08,213 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=161, resume processing ppid=158 2024-11-16T06:27:08,213 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, ppid=158, state=SUCCESS, hasLock=false; OpenRegionProcedure dc70c908a8ef2bca6135951fdf7bb226, server=3456ee6a3164,40155,1731738278871 in 207 msec 2024-11-16T06:27:08,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7a242737ed7817de1178937e7926f1a3, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:27:08,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dc70c908a8ef2bca6135951fdf7bb226, ASSIGN in 373 msec 2024-11-16T06:27:08,217 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-11-16T06:27:08,217 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; OpenRegionProcedure 7a242737ed7817de1178937e7926f1a3, server=3456ee6a3164,37533,1731738278939 in 219 msec 2024-11-16T06:27:08,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=157 2024-11-16T06:27:08,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7a242737ed7817de1178937e7926f1a3, ASSIGN in 377 msec 2024-11-16T06:27:08,220 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:27:08,221 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738428220"}]},"ts":"1731738428220"} 2024-11-16T06:27:08,222 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-16T06:27:08,223 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:27:08,224 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-16T06:27:08,227 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-16T06:27:08,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:08,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:08,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:08,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:08,232 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:08,232 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:08,232 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:08,232 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:08,233 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:08,233 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:08,233 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:08,233 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:08,235 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 506 msec 2024-11-16T06:27:08,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-16T06:27:08,275 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-16T06:27:08,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-16T06:27:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-16T06:27:08,355 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-16T06:27:08,355 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:27:08,358 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-16T06:27:08,358 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:08,358 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:27:08,360 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:27:08,366 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:27:08,374 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:27:08,378 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-16T06:27:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738428378 (current time:1731738428378). 2024-11-16T06:27:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:27:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-16T06:27:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:27:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cba14a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:27:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:27:08,379 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:27:08,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:27:08,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:27:08,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@345e59ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:08,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:27:08,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:27:08,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:08,381 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50678, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:27:08,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18a568ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:27:08,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:27:08,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:27:08,385 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35328, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:27:08,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:27:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:27:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:08,386 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:27:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@331642b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:27:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:27:08,388 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:27:08,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:27:08,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:27:08,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b1d223, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:08,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:27:08,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:27:08,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:08,390 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50706, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:27:08,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1ac075, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:27:08,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:27:08,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:27:08,394 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35334, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:27:08,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:27:08,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:27:08,397 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39628, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:27:08,399 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381. 2024-11-16T06:27:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:27:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-16T06:27:08,400 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:27:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:27:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-16T06:27:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-16T06:27:08,403 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:27:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-16T06:27:08,405 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:27:08,408 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:27:08,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742205_1381 (size=185) 2024-11-16T06:27:08,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742205_1381 (size=185) 2024-11-16T06:27:08,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742205_1381 (size=185) 2024-11-16T06:27:08,415 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:27:08,415 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dc70c908a8ef2bca6135951fdf7bb226}, {pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7a242737ed7817de1178937e7926f1a3}] 2024-11-16T06:27:08,416 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,417 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-16T06:27:08,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=163 2024-11-16T06:27:08,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=164 2024-11-16T06:27:08,568 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:08,568 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:08,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.HRegion(2603): Flush status journal for dc70c908a8ef2bca6135951fdf7bb226: 2024-11-16T06:27:08,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.HRegion(2603): Flush status journal for 7a242737ed7817de1178937e7926f1a3: 2024-11-16T06:27:08,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-16T06:27:08,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-16T06:27:08,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:08,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:27:08,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:08,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:27:08,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:27:08,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:27:08,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742207_1383 (size=76) 2024-11-16T06:27:08,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742207_1383 (size=76) 2024-11-16T06:27:08,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742207_1383 (size=76) 2024-11-16T06:27:08,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742206_1382 (size=76) 2024-11-16T06:27:08,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742206_1382 (size=76) 2024-11-16T06:27:08,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742206_1382 (size=76) 2024-11-16T06:27:08,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:08,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=163 2024-11-16T06:27:08,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:08,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-16T06:27:08,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=163 2024-11-16T06:27:08,581 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,581 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=164 2024-11-16T06:27:08,582 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,582 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,586 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7a242737ed7817de1178937e7926f1a3 in 170 msec 2024-11-16T06:27:08,587 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-11-16T06:27:08,587 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure dc70c908a8ef2bca6135951fdf7bb226 in 170 msec 2024-11-16T06:27:08,587 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:27:08,588 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:27:08,589 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:27:08,589 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:27:08,589 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:27:08,590 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-16T06:27:08,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742208_1384 (size=68) 2024-11-16T06:27:08,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742208_1384 (size=68) 2024-11-16T06:27:08,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742208_1384 (size=68) 2024-11-16T06:27:08,602 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:27:08,602 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:08,603 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:08,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742209_1385 (size=673) 2024-11-16T06:27:08,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742209_1385 (size=673) 2024-11-16T06:27:08,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742209_1385 (size=673) 2024-11-16T06:27:08,626 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:27:08,631 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:27:08,632 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:08,634 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:27:08,634 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-16T06:27:08,635 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 233 msec 2024-11-16T06:27:08,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-16T06:27:08,725 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-16T06:27:08,733 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40155 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:27:08,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37533 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:27:08,737 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:27:08,740 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-16T06:27:08,740 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:08,740 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:27:08,742 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:27:08,747 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:27:08,754 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-16T06:27:08,757 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-16T06:27:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738428758 (current time:1731738428758). 2024-11-16T06:27:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:27:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-16T06:27:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:27:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62c1314a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:27:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:27:08,760 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:27:08,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:27:08,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:27:08,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@628b8b07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:08,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:27:08,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:27:08,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:08,762 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50732, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:27:08,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bf56a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:27:08,764 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:27:08,764 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:27:08,765 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35350, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:27:08,766 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381. 2024-11-16T06:27:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:27:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:08,767 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:27:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2df6543a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:27:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:27:08,768 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:27:08,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:27:08,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:27:08,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24858b79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:08,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:27:08,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:27:08,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:08,770 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50752, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:27:08,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26f790f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:27:08,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:27:08,773 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:27:08,774 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35356, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:27:08,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:27:08,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:27:08,777 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39638, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:27:08,778 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381. 2024-11-16T06:27:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:27:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:08,779 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:27:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-16T06:27:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:27:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-16T06:27:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-16T06:27:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-16T06:27:08,782 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:27:08,783 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:27:08,785 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:27:08,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742210_1386 (size=180) 2024-11-16T06:27:08,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742210_1386 (size=180) 2024-11-16T06:27:08,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742210_1386 (size=180) 2024-11-16T06:27:08,795 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:27:08,796 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dc70c908a8ef2bca6135951fdf7bb226}, {pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7a242737ed7817de1178937e7926f1a3}] 2024-11-16T06:27:08,797 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:08,797 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-16T06:27:08,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=166 2024-11-16T06:27:08,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=167 2024-11-16T06:27:08,949 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:08,949 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2902): Flushing dc70c908a8ef2bca6135951fdf7bb226 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-16T06:27:08,949 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:08,950 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2902): Flushing 7a242737ed7817de1178937e7926f1a3 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-16T06:27:08,979 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241116f62dad01f18741e39194d176c3f5cdf0_7a242737ed7817de1178937e7926f1a3 is 71, key is 136828b060b0da175277e89e04e2afcf/cf:q/1731738428735/Put/seqid=0 2024-11-16T06:27:08,984 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111638971a6256d84190b324eba4c45dbf68_dc70c908a8ef2bca6135951fdf7bb226 is 71, key is 08944b8727daef14c375bf8e5594b3bf/cf:q/1731738428733/Put/seqid=0 2024-11-16T06:27:09,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742211_1387 (size=8241) 2024-11-16T06:27:09,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742211_1387 (size=8241) 2024-11-16T06:27:09,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742211_1387 (size=8241) 2024-11-16T06:27:09,006 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:27:09,011 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241116f62dad01f18741e39194d176c3f5cdf0_7a242737ed7817de1178937e7926f1a3 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241116f62dad01f18741e39194d176c3f5cdf0_7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:09,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/.tmp/cf/fa46c184c20b4418896bcd2d0009c0c8, store: [table=testtb-testEmptyExportFileSystemState family=cf region=7a242737ed7817de1178937e7926f1a3] 2024-11-16T06:27:09,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/.tmp/cf/fa46c184c20b4418896bcd2d0009c0c8 is 214, key is 1ae70568f5e841ce2fff7ea183b70c8ef/cf:q/1731738428735/Put/seqid=0 2024-11-16T06:27:09,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742212_1388 (size=5032) 2024-11-16T06:27:09,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742212_1388 (size=5032) 2024-11-16T06:27:09,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742212_1388 (size=5032) 2024-11-16T06:27:09,032 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:27:09,036 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111638971a6256d84190b324eba4c45dbf68_dc70c908a8ef2bca6135951fdf7bb226 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024111638971a6256d84190b324eba4c45dbf68_dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:09,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/.tmp/cf/a58248c86ebe4da9943ca0fbbc711976, store: [table=testtb-testEmptyExportFileSystemState family=cf region=dc70c908a8ef2bca6135951fdf7bb226] 2024-11-16T06:27:09,038 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/.tmp/cf/a58248c86ebe4da9943ca0fbbc711976 is 214, key is 0d9c2aec154755f950f67ebd17d65fa23/cf:q/1731738428733/Put/seqid=0 2024-11-16T06:27:09,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742213_1389 (size=15447) 2024-11-16T06:27:09,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742213_1389 (size=15447) 2024-11-16T06:27:09,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742213_1389 (size=15447) 2024-11-16T06:27:09,059 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/.tmp/cf/fa46c184c20b4418896bcd2d0009c0c8 2024-11-16T06:27:09,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/.tmp/cf/fa46c184c20b4418896bcd2d0009c0c8 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/cf/fa46c184c20b4418896bcd2d0009c0c8 2024-11-16T06:27:09,071 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/cf/fa46c184c20b4418896bcd2d0009c0c8, entries=48, sequenceid=6, filesize=15.1 K 2024-11-16T06:27:09,072 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 7a242737ed7817de1178937e7926f1a3 in 122ms, sequenceid=6, compaction requested=false 2024-11-16T06:27:09,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-16T06:27:09,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2603): Flush status journal for 7a242737ed7817de1178937e7926f1a3: 2024-11-16T06:27:09,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-16T06:27:09,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:09,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:27:09,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/cf/fa46c184c20b4418896bcd2d0009c0c8] hfiles 2024-11-16T06:27:09,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/cf/fa46c184c20b4418896bcd2d0009c0c8 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:09,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742214_1390 (size=5724) 2024-11-16T06:27:09,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742214_1390 (size=5724) 2024-11-16T06:27:09,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742214_1390 (size=5724) 2024-11-16T06:27:09,077 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/.tmp/cf/a58248c86ebe4da9943ca0fbbc711976 2024-11-16T06:27:09,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/.tmp/cf/a58248c86ebe4da9943ca0fbbc711976 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/cf/a58248c86ebe4da9943ca0fbbc711976 2024-11-16T06:27:09,093 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/cf/a58248c86ebe4da9943ca0fbbc711976, entries=2, sequenceid=6, filesize=5.6 K 2024-11-16T06:27:09,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-16T06:27:09,096 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for dc70c908a8ef2bca6135951fdf7bb226 in 146ms, sequenceid=6, compaction requested=false 2024-11-16T06:27:09,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2603): Flush status journal for dc70c908a8ef2bca6135951fdf7bb226: 2024-11-16T06:27:09,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-16T06:27:09,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:09,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:27:09,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/cf/a58248c86ebe4da9943ca0fbbc711976] hfiles 2024-11-16T06:27:09,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/cf/a58248c86ebe4da9943ca0fbbc711976 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:09,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742215_1391 (size=115) 2024-11-16T06:27:09,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742215_1391 (size=115) 2024-11-16T06:27:09,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742215_1391 (size=115) 2024-11-16T06:27:09,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:09,101 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=167 2024-11-16T06:27:09,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=167 2024-11-16T06:27:09,101 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:09,102 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:09,104 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7a242737ed7817de1178937e7926f1a3 in 307 msec 2024-11-16T06:27:09,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742216_1392 (size=115) 2024-11-16T06:27:09,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742216_1392 (size=115) 2024-11-16T06:27:09,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742216_1392 (size=115) 2024-11-16T06:27:09,117 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:09,117 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-16T06:27:09,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=166 2024-11-16T06:27:09,117 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:09,119 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:09,134 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-11-16T06:27:09,134 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure dc70c908a8ef2bca6135951fdf7bb226 in 324 msec 2024-11-16T06:27:09,134 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:27:09,135 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:27:09,136 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:27:09,136 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:27:09,136 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:27:09,137 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241116f62dad01f18741e39194d176c3f5cdf0_7a242737ed7817de1178937e7926f1a3, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024111638971a6256d84190b324eba4c45dbf68_dc70c908a8ef2bca6135951fdf7bb226] hfiles 2024-11-16T06:27:09,137 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241116f62dad01f18741e39194d176c3f5cdf0_7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:09,137 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024111638971a6256d84190b324eba4c45dbf68_dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:09,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742217_1393 (size=299) 2024-11-16T06:27:09,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742217_1393 (size=299) 2024-11-16T06:27:09,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742217_1393 (size=299) 2024-11-16T06:27:09,148 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:27:09,148 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:09,148 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:09,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742218_1394 (size=983) 2024-11-16T06:27:09,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742218_1394 (size=983) 2024-11-16T06:27:09,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742218_1394 (size=983) 2024-11-16T06:27:09,191 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:27:09,207 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:27:09,208 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:09,210 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:27:09,210 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-16T06:27:09,212 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 430 msec 2024-11-16T06:27:09,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-16T06:27:09,405 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-16T06:27:09,405 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738429405 2024-11-16T06:27:09,405 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:36767, tgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738429405, rawTgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738429405, srcFsUri=hdfs://localhost:36767, srcDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:27:09,444 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:36767, inputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:27:09,444 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738429405, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738429405/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:09,446 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-16T06:27:09,452 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738429405/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:09,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742219_1395 (size=673) 2024-11-16T06:27:09,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742219_1395 (size=673) 2024-11-16T06:27:09,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742219_1395 (size=673) 2024-11-16T06:27:09,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742220_1396 (size=185) 2024-11-16T06:27:09,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742220_1396 (size=185) 2024-11-16T06:27:09,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742220_1396 (size=185) 2024-11-16T06:27:09,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:09,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:09,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:10,538 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-2921347914731466410.jar 2024-11-16T06:27:10,538 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:10,538 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:10,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-13004965904716241084.jar 2024-11-16T06:27:10,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:10,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:10,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:10,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:10,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:10,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:10,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-16T06:27:10,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-16T06:27:10,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-16T06:27:10,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-16T06:27:10,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-16T06:27:10,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-16T06:27:10,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-16T06:27:10,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-16T06:27:10,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-16T06:27:10,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-16T06:27:10,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-16T06:27:10,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:27:10,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:27:10,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:27:10,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:27:10,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:27:10,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:27:10,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:27:10,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742221_1397 (size=131440) 2024-11-16T06:27:10,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742221_1397 (size=131440) 2024-11-16T06:27:10,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742221_1397 (size=131440) 2024-11-16T06:27:10,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742222_1398 (size=4188619) 2024-11-16T06:27:10,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742222_1398 (size=4188619) 2024-11-16T06:27:10,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742222_1398 (size=4188619) 2024-11-16T06:27:10,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742223_1399 (size=1323991) 2024-11-16T06:27:10,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742223_1399 (size=1323991) 2024-11-16T06:27:10,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742223_1399 (size=1323991) 2024-11-16T06:27:10,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742224_1400 (size=903732) 2024-11-16T06:27:10,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742224_1400 (size=903732) 2024-11-16T06:27:10,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742224_1400 (size=903732) 2024-11-16T06:27:10,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742225_1401 (size=8360083) 2024-11-16T06:27:10,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742225_1401 (size=8360083) 2024-11-16T06:27:10,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742225_1401 (size=8360083) 2024-11-16T06:27:10,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742226_1402 (size=6424738) 2024-11-16T06:27:10,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742226_1402 (size=6424738) 2024-11-16T06:27:10,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742226_1402 (size=6424738) 2024-11-16T06:27:10,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742227_1403 (size=1877034) 2024-11-16T06:27:10,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742227_1403 (size=1877034) 2024-11-16T06:27:10,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742227_1403 (size=1877034) 2024-11-16T06:27:10,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742228_1404 (size=77835) 2024-11-16T06:27:10,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742228_1404 (size=77835) 2024-11-16T06:27:10,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742228_1404 (size=77835) 2024-11-16T06:27:10,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742229_1405 (size=30949) 2024-11-16T06:27:10,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742229_1405 (size=30949) 2024-11-16T06:27:10,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742229_1405 (size=30949) 2024-11-16T06:27:10,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742230_1406 (size=1597327) 2024-11-16T06:27:10,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742230_1406 (size=1597327) 2024-11-16T06:27:10,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742230_1406 (size=1597327) 2024-11-16T06:27:10,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742231_1407 (size=440656) 2024-11-16T06:27:10,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742231_1407 (size=440656) 2024-11-16T06:27:10,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742231_1407 (size=440656) 2024-11-16T06:27:11,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742232_1408 (size=4695811) 2024-11-16T06:27:11,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742232_1408 (size=4695811) 2024-11-16T06:27:11,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742232_1408 (size=4695811) 2024-11-16T06:27:11,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742233_1409 (size=232957) 2024-11-16T06:27:11,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742233_1409 (size=232957) 2024-11-16T06:27:11,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742233_1409 (size=232957) 2024-11-16T06:27:11,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742234_1410 (size=127628) 2024-11-16T06:27:11,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742234_1410 (size=127628) 2024-11-16T06:27:11,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742234_1410 (size=127628) 2024-11-16T06:27:11,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742235_1411 (size=20406) 2024-11-16T06:27:11,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742235_1411 (size=20406) 2024-11-16T06:27:11,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742235_1411 (size=20406) 2024-11-16T06:27:11,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742236_1412 (size=5175431) 2024-11-16T06:27:11,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742236_1412 (size=5175431) 2024-11-16T06:27:11,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742236_1412 (size=5175431) 2024-11-16T06:27:11,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742237_1413 (size=217634) 2024-11-16T06:27:11,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742237_1413 (size=217634) 2024-11-16T06:27:11,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742237_1413 (size=217634) 2024-11-16T06:27:11,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742238_1414 (size=1832290) 2024-11-16T06:27:11,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742238_1414 (size=1832290) 2024-11-16T06:27:11,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742238_1414 (size=1832290) 2024-11-16T06:27:11,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742239_1415 (size=322274) 2024-11-16T06:27:11,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742239_1415 (size=322274) 2024-11-16T06:27:11,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742239_1415 (size=322274) 2024-11-16T06:27:11,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742240_1416 (size=503880) 2024-11-16T06:27:11,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742240_1416 (size=503880) 2024-11-16T06:27:11,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742240_1416 (size=503880) 2024-11-16T06:27:11,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742241_1417 (size=29229) 2024-11-16T06:27:11,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742241_1417 (size=29229) 2024-11-16T06:27:11,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742241_1417 (size=29229) 2024-11-16T06:27:11,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742242_1418 (size=24096) 2024-11-16T06:27:11,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742242_1418 (size=24096) 2024-11-16T06:27:11,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742242_1418 (size=24096) 2024-11-16T06:27:11,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742243_1419 (size=111872) 2024-11-16T06:27:11,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742243_1419 (size=111872) 2024-11-16T06:27:11,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742243_1419 (size=111872) 2024-11-16T06:27:11,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742244_1420 (size=45609) 2024-11-16T06:27:11,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742244_1420 (size=45609) 2024-11-16T06:27:11,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742244_1420 (size=45609) 2024-11-16T06:27:11,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742245_1421 (size=136454) 2024-11-16T06:27:11,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742245_1421 (size=136454) 2024-11-16T06:27:11,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742245_1421 (size=136454) 2024-11-16T06:27:11,223 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-16T06:27:11,225 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-16T06:27:11,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742246_1422 (size=7) 2024-11-16T06:27:11,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742246_1422 (size=7) 2024-11-16T06:27:11,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742246_1422 (size=7) 2024-11-16T06:27:11,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742247_1423 (size=10) 2024-11-16T06:27:11,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742247_1423 (size=10) 2024-11-16T06:27:11,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742247_1423 (size=10) 2024-11-16T06:27:11,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742248_1424 (size=303633) 2024-11-16T06:27:11,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742248_1424 (size=303633) 2024-11-16T06:27:11,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742248_1424 (size=303633) 2024-11-16T06:27:11,437 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:27:11,437 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:27:11,452 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0007_000001 (auth:SIMPLE) from 127.0.0.1:49604 2024-11-16T06:27:12,941 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:27:17,202 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0007_000001 (auth:SIMPLE) from 127.0.0.1:40748 2024-11-16T06:27:17,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742249_1425 (size=349259) 2024-11-16T06:27:17,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742249_1425 (size=349259) 2024-11-16T06:27:17,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742249_1425 (size=349259) 2024-11-16T06:27:18,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742250_1426 (size=8568) 2024-11-16T06:27:18,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742250_1426 (size=8568) 2024-11-16T06:27:18,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742250_1426 (size=8568) 2024-11-16T06:27:18,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742251_1427 (size=460) 2024-11-16T06:27:18,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742251_1427 (size=460) 2024-11-16T06:27:18,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742251_1427 (size=460) 2024-11-16T06:27:18,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742252_1428 (size=8568) 2024-11-16T06:27:18,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742252_1428 (size=8568) 2024-11-16T06:27:18,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742252_1428 (size=8568) 2024-11-16T06:27:18,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742253_1429 (size=349259) 2024-11-16T06:27:18,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742253_1429 (size=349259) 2024-11-16T06:27:18,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742253_1429 (size=349259) 2024-11-16T06:27:19,554 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-16T06:27:19,555 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-16T06:27:19,561 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:19,561 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-16T06:27:19,562 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-16T06:27:19,562 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:19,562 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-16T06:27:19,562 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-16T06:27:19,562 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738429405/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738429405/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:19,563 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738429405/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-16T06:27:19,563 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738429405/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-16T06:27:19,569 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-16T06:27:19,574 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738439574"}]},"ts":"1731738439574"} 2024-11-16T06:27:19,577 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-16T06:27:19,577 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-16T06:27:19,578 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-16T06:27:19,580 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dc70c908a8ef2bca6135951fdf7bb226, UNASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7a242737ed7817de1178937e7926f1a3, UNASSIGN}] 2024-11-16T06:27:19,581 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7a242737ed7817de1178937e7926f1a3, UNASSIGN 2024-11-16T06:27:19,582 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dc70c908a8ef2bca6135951fdf7bb226, UNASSIGN 2024-11-16T06:27:19,584 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=dc70c908a8ef2bca6135951fdf7bb226, regionState=CLOSING, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:27:19,584 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=7a242737ed7817de1178937e7926f1a3, regionState=CLOSING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:27:19,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dc70c908a8ef2bca6135951fdf7bb226, UNASSIGN because future has completed 2024-11-16T06:27:19,587 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:27:19,587 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure dc70c908a8ef2bca6135951fdf7bb226, server=3456ee6a3164,40155,1731738278871}] 2024-11-16T06:27:19,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7a242737ed7817de1178937e7926f1a3, UNASSIGN because future has completed 2024-11-16T06:27:19,588 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:27:19,588 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7a242737ed7817de1178937e7926f1a3, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:27:19,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-16T06:27:19,740 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(122): Close dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:19,740 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:27:19,740 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1722): Closing dc70c908a8ef2bca6135951fdf7bb226, disabling compactions & flushes 2024-11-16T06:27:19,740 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:19,741 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:19,741 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. after waiting 0 ms 2024-11-16T06:27:19,741 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:19,748 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(122): Close 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:19,748 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:27:19,748 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1722): Closing 7a242737ed7817de1178937e7926f1a3, disabling compactions & flushes 2024-11-16T06:27:19,748 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:19,748 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:19,748 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. after waiting 0 ms 2024-11-16T06:27:19,748 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:19,774 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:27:19,775 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:27:19,775 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226. 2024-11-16T06:27:19,775 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1676): Region close journal for dc70c908a8ef2bca6135951fdf7bb226: Waiting for close lock at 1731738439740Running coprocessor pre-close hooks at 1731738439740Disabling compacts and flushes for region at 1731738439740Disabling writes for close at 1731738439741 (+1 ms)Writing region close event to WAL at 1731738439753 (+12 ms)Running coprocessor post-close hooks at 1731738439775 (+22 ms)Closed at 1731738439775 2024-11-16T06:27:19,778 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:27:19,778 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(157): Closed dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:19,780 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:27:19,780 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3. 2024-11-16T06:27:19,780 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1676): Region close journal for 7a242737ed7817de1178937e7926f1a3: Waiting for close lock at 1731738439748Running coprocessor pre-close hooks at 1731738439748Disabling compacts and flushes for region at 1731738439748Disabling writes for close at 1731738439748Writing region close event to WAL at 1731738439766 (+18 ms)Running coprocessor post-close hooks at 1731738439780 (+14 ms)Closed at 1731738439780 2024-11-16T06:27:19,781 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=dc70c908a8ef2bca6135951fdf7bb226, regionState=CLOSED 2024-11-16T06:27:19,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure dc70c908a8ef2bca6135951fdf7bb226, server=3456ee6a3164,40155,1731738278871 because future has completed 2024-11-16T06:27:19,787 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(157): Closed 7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:19,788 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=7a242737ed7817de1178937e7926f1a3, regionState=CLOSED 2024-11-16T06:27:19,790 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7a242737ed7817de1178937e7926f1a3, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:27:19,794 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=170 2024-11-16T06:27:19,795 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=170, state=SUCCESS, hasLock=false; CloseRegionProcedure dc70c908a8ef2bca6135951fdf7bb226, server=3456ee6a3164,40155,1731738278871 in 201 msec 2024-11-16T06:27:19,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dc70c908a8ef2bca6135951fdf7bb226, UNASSIGN in 215 msec 2024-11-16T06:27:19,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=171 2024-11-16T06:27:19,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=171, state=SUCCESS, hasLock=false; CloseRegionProcedure 7a242737ed7817de1178937e7926f1a3, server=3456ee6a3164,37533,1731738278939 in 205 msec 2024-11-16T06:27:19,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=171, resume processing ppid=169 2024-11-16T06:27:19,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=7a242737ed7817de1178937e7926f1a3, UNASSIGN in 218 msec 2024-11-16T06:27:19,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=169, resume processing ppid=168 2024-11-16T06:27:19,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, ppid=168, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 223 msec 2024-11-16T06:27:19,806 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738439806"}]},"ts":"1731738439806"} 2024-11-16T06:27:19,809 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-16T06:27:19,809 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-16T06:27:19,813 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 241 msec 2024-11-16T06:27:19,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-16T06:27:19,895 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-16T06:27:19,895 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,897 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,899 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=174, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,902 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,904 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:19,904 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:19,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,907 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/recovered.edits] 2024-11-16T06:27:19,908 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-16T06:27:19,908 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-16T06:27:19,908 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-16T06:27:19,909 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/recovered.edits] 2024-11-16T06:27:19,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:19,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:19,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:19,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:19,911 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-11-16T06:27:19,911 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-16T06:27:19,914 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/cf/a58248c86ebe4da9943ca0fbbc711976 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/cf/a58248c86ebe4da9943ca0fbbc711976 2024-11-16T06:27:19,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-16T06:27:19,914 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:19,914 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:19,915 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:19,915 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:19,915 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/cf/fa46c184c20b4418896bcd2d0009c0c8 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/cf/fa46c184c20b4418896bcd2d0009c0c8 2024-11-16T06:27:19,918 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226/recovered.edits/9.seqid 2024-11-16T06:27:19,918 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:19,919 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3/recovered.edits/9.seqid 2024-11-16T06:27:19,919 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testEmptyExportFileSystemState/7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:19,919 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-16T06:27:19,920 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-11-16T06:27:19,921 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-11-16T06:27:19,925 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241116f62dad01f18741e39194d176c3f5cdf0_7a242737ed7817de1178937e7926f1a3 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241116f62dad01f18741e39194d176c3f5cdf0_7a242737ed7817de1178937e7926f1a3 2024-11-16T06:27:19,926 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024111638971a6256d84190b324eba4c45dbf68_dc70c908a8ef2bca6135951fdf7bb226 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024111638971a6256d84190b324eba4c45dbf68_dc70c908a8ef2bca6135951fdf7bb226 2024-11-16T06:27:19,927 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-11-16T06:27:19,930 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=174, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,932 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-16T06:27:19,935 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-16T06:27:19,937 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=174, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,937 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-16T06:27:19,937 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738439937"}]},"ts":"9223372036854775807"} 2024-11-16T06:27:19,937 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738439937"}]},"ts":"9223372036854775807"} 2024-11-16T06:27:19,940 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-16T06:27:19,940 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => dc70c908a8ef2bca6135951fdf7bb226, NAME => 'testtb-testEmptyExportFileSystemState,,1731738427724.dc70c908a8ef2bca6135951fdf7bb226.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 7a242737ed7817de1178937e7926f1a3, NAME => 'testtb-testEmptyExportFileSystemState,1,1731738427724.7a242737ed7817de1178937e7926f1a3.', STARTKEY => '1', ENDKEY => ''}] 2024-11-16T06:27:19,940 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-16T06:27:19,940 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731738439940"}]},"ts":"9223372036854775807"} 2024-11-16T06:27:19,945 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-16T06:27:19,946 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=174, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-16T06:27:19,947 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 51 msec 2024-11-16T06:27:20,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-16T06:27:20,025 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-16T06:27:20,025 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-16T06:27:20,032 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-16T06:27:20,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:20,037 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-16T06:27:20,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-16T06:27:20,075 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=807 (was 793) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1678526367_1 at /127.0.0.1:48976 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1678526367_1 at /127.0.0.1:56390 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5734 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (692179358) connection to localhost/127.0.0.1:37535 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 128660) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37535 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:49880 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (692179358) connection to localhost/127.0.0.1:36617 from appattempt_1731738285782_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:35590 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:37214 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=817 (was 791) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=447 (was 461), ProcessCount=15 (was 16), AvailableMemoryMB=2024 (was 980) - AvailableMemoryMB LEAK? - 2024-11-16T06:27:20,075 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=807 is superior to 500 2024-11-16T06:27:20,096 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=807, OpenFileDescriptor=817, MaxFileDescriptor=1048576, SystemLoadAverage=447, ProcessCount=15, AvailableMemoryMB=2023 2024-11-16T06:27:20,096 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=807 is superior to 500 2024-11-16T06:27:20,098 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:27:20,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-16T06:27:20,101 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:27:20,101 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 175 2024-11-16T06:27:20,103 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:27:20,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-16T06:27:20,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742254_1430 (size=440) 2024-11-16T06:27:20,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742254_1430 (size=440) 2024-11-16T06:27:20,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742254_1430 (size=440) 2024-11-16T06:27:20,122 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 558bc9b497211de804309b98bfd4a1bc, NAME => 'testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:27:20,122 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5da91dc6c7f65b5b577c46271ab8bfb7, NAME => 'testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:27:20,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742255_1431 (size=65) 2024-11-16T06:27:20,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742255_1431 (size=65) 2024-11-16T06:27:20,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742255_1431 (size=65) 2024-11-16T06:27:20,176 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:27:20,176 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 558bc9b497211de804309b98bfd4a1bc, disabling compactions & flushes 2024-11-16T06:27:20,176 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:27:20,176 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:27:20,176 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. after waiting 0 ms 2024-11-16T06:27:20,176 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:27:20,176 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:27:20,176 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 558bc9b497211de804309b98bfd4a1bc: Waiting for close lock at 1731738440176Disabling compacts and flushes for region at 1731738440176Disabling writes for close at 1731738440176Writing region close event to WAL at 1731738440176Closed at 1731738440176 2024-11-16T06:27:20,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742256_1432 (size=65) 2024-11-16T06:27:20,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742256_1432 (size=65) 2024-11-16T06:27:20,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742256_1432 (size=65) 2024-11-16T06:27:20,187 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:27:20,187 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 5da91dc6c7f65b5b577c46271ab8bfb7, disabling compactions & flushes 2024-11-16T06:27:20,187 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:20,187 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:20,187 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. after waiting 0 ms 2024-11-16T06:27:20,187 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:20,187 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:20,187 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5da91dc6c7f65b5b577c46271ab8bfb7: Waiting for close lock at 1731738440187Disabling compacts and flushes for region at 1731738440187Disabling writes for close at 1731738440187Writing region close event to WAL at 1731738440187Closed at 1731738440187 2024-11-16T06:27:20,188 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:27:20,189 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731738440189"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738440189"}]},"ts":"1731738440189"} 2024-11-16T06:27:20,189 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731738440189"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738440189"}]},"ts":"1731738440189"} 2024-11-16T06:27:20,191 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-16T06:27:20,193 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:27:20,194 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738440194"}]},"ts":"1731738440194"} 2024-11-16T06:27:20,196 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-16T06:27:20,196 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:27:20,197 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:27:20,197 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:27:20,197 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:27:20,197 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:27:20,197 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:27:20,197 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:27:20,197 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:27:20,197 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:27:20,197 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:27:20,197 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:27:20,197 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5da91dc6c7f65b5b577c46271ab8bfb7, ASSIGN}, {pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=558bc9b497211de804309b98bfd4a1bc, ASSIGN}] 2024-11-16T06:27:20,199 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=558bc9b497211de804309b98bfd4a1bc, ASSIGN 2024-11-16T06:27:20,199 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5da91dc6c7f65b5b577c46271ab8bfb7, ASSIGN 2024-11-16T06:27:20,200 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=558bc9b497211de804309b98bfd4a1bc, ASSIGN; state=OFFLINE, location=3456ee6a3164,37533,1731738278939; forceNewPlan=false, retain=false 2024-11-16T06:27:20,201 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5da91dc6c7f65b5b577c46271ab8bfb7, ASSIGN; state=OFFLINE, location=3456ee6a3164,35939,1731738278639; forceNewPlan=false, retain=false 2024-11-16T06:27:20,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-16T06:27:20,351 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-16T06:27:20,352 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=5da91dc6c7f65b5b577c46271ab8bfb7, regionState=OPENING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:27:20,352 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=558bc9b497211de804309b98bfd4a1bc, regionState=OPENING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:27:20,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=558bc9b497211de804309b98bfd4a1bc, ASSIGN because future has completed 2024-11-16T06:27:20,355 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure 558bc9b497211de804309b98bfd4a1bc, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:27:20,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5da91dc6c7f65b5b577c46271ab8bfb7, ASSIGN because future has completed 2024-11-16T06:27:20,357 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=179, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:27:20,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-16T06:27:20,511 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:27:20,511 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:20,511 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7752): Opening region: {ENCODED => 5da91dc6c7f65b5b577c46271ab8bfb7, NAME => 'testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7.', STARTKEY => '', ENDKEY => '1'} 2024-11-16T06:27:20,511 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7752): Opening region: {ENCODED => 558bc9b497211de804309b98bfd4a1bc, NAME => 'testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc.', STARTKEY => '1', ENDKEY => ''} 2024-11-16T06:27:20,511 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. service=AccessControlService 2024-11-16T06:27:20,511 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. service=AccessControlService 2024-11-16T06:27:20,512 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:27:20,512 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:27:20,512 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:20,512 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:20,512 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:27:20,512 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:27:20,512 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7794): checking encryption for 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:20,512 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7797): checking classloading for 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:20,512 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7794): checking encryption for 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:20,512 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7797): checking classloading for 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:20,513 INFO [StoreOpener-558bc9b497211de804309b98bfd4a1bc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:20,513 INFO [StoreOpener-5da91dc6c7f65b5b577c46271ab8bfb7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:20,514 INFO [StoreOpener-5da91dc6c7f65b5b577c46271ab8bfb7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5da91dc6c7f65b5b577c46271ab8bfb7 columnFamilyName cf 2024-11-16T06:27:20,514 INFO [StoreOpener-558bc9b497211de804309b98bfd4a1bc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 558bc9b497211de804309b98bfd4a1bc columnFamilyName cf 2024-11-16T06:27:20,515 DEBUG [StoreOpener-558bc9b497211de804309b98bfd4a1bc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:27:20,515 DEBUG [StoreOpener-5da91dc6c7f65b5b577c46271ab8bfb7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:27:20,516 INFO [StoreOpener-5da91dc6c7f65b5b577c46271ab8bfb7-1 {}] regionserver.HStore(327): Store=5da91dc6c7f65b5b577c46271ab8bfb7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:27:20,516 INFO [StoreOpener-558bc9b497211de804309b98bfd4a1bc-1 {}] regionserver.HStore(327): Store=558bc9b497211de804309b98bfd4a1bc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:27:20,516 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1038): replaying wal for 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:20,516 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1038): replaying wal for 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:20,517 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:20,517 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:20,517 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:20,517 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:20,517 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1048): stopping wal replay for 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:20,517 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1048): stopping wal replay for 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:20,517 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1060): Cleaning up temporary data for 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:20,517 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1060): Cleaning up temporary data for 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:20,519 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1093): writing seq id for 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:20,519 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1093): writing seq id for 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:20,521 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:27:20,521 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1114): Opened 5da91dc6c7f65b5b577c46271ab8bfb7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59112120, jitterRate=-0.11916077136993408}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:27:20,521 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:20,522 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1006): Region open journal for 5da91dc6c7f65b5b577c46271ab8bfb7: Running coprocessor pre-open hook at 1731738440512Writing region info on filesystem at 1731738440512Initializing all the Stores at 1731738440513 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738440513Cleaning up temporary data from old regions at 1731738440517 (+4 ms)Running coprocessor post-open hooks at 1731738440521 (+4 ms)Region opened successfully at 1731738440522 (+1 ms) 2024-11-16T06:27:20,523 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:27:20,523 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7., pid=179, masterSystemTime=1731738440509 2024-11-16T06:27:20,524 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1114): Opened 558bc9b497211de804309b98bfd4a1bc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67300239, jitterRate=0.002851709723472595}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:27:20,524 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:20,524 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1006): Region open journal for 558bc9b497211de804309b98bfd4a1bc: Running coprocessor pre-open hook at 1731738440512Writing region info on filesystem at 1731738440512Initializing all the Stores at 1731738440513 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738440513Cleaning up temporary data from old regions at 1731738440517 (+4 ms)Running coprocessor post-open hooks at 1731738440524 (+7 ms)Region opened successfully at 1731738440524 2024-11-16T06:27:20,525 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc., pid=178, masterSystemTime=1731738440508 2024-11-16T06:27:20,525 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:20,525 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:20,526 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=5da91dc6c7f65b5b577c46271ab8bfb7, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:27:20,529 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:27:20,529 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:27:20,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=179, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:27:20,529 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=558bc9b497211de804309b98bfd4a1bc, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:27:20,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=178, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure 558bc9b497211de804309b98bfd4a1bc, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:27:20,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=176 2024-11-16T06:27:20,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=176, state=SUCCESS, hasLock=false; OpenRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7, server=3456ee6a3164,35939,1731738278639 in 173 msec 2024-11-16T06:27:20,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=177 2024-11-16T06:27:20,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5da91dc6c7f65b5b577c46271ab8bfb7, ASSIGN in 336 msec 2024-11-16T06:27:20,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; OpenRegionProcedure 558bc9b497211de804309b98bfd4a1bc, server=3456ee6a3164,37533,1731738278939 in 178 msec 2024-11-16T06:27:20,537 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=177, resume processing ppid=175 2024-11-16T06:27:20,537 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=558bc9b497211de804309b98bfd4a1bc, ASSIGN in 338 msec 2024-11-16T06:27:20,538 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:27:20,538 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738440538"}]},"ts":"1731738440538"} 2024-11-16T06:27:20,540 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-16T06:27:20,541 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:27:20,541 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-16T06:27:20,545 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-16T06:27:20,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:20,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:20,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:20,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:27:20,552 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:20,553 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:20,553 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:20,553 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:20,553 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:20,553 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:20,553 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:20,553 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-16T06:27:20,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 454 msec 2024-11-16T06:27:20,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-16T06:27:20,735 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-16T06:27:20,735 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-16T06:27:20,737 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-16T06:27:20,737 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:20,738 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:27:20,740 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-16T06:27:20,746 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-16T06:27:20,752 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-16T06:27:20,756 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-16T06:27:20,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738440756 (current time:1731738440756). 2024-11-16T06:27:20,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:27:20,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-16T06:27:20,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:27:20,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60dde18c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:20,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:27:20,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:27:20,758 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:27:20,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:27:20,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:27:20,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79d665d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:20,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:27:20,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:27:20,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:20,760 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34582, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:27:20,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f7ccb3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:20,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:27:20,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:27:20,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:27:20,763 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56600, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:27:20,765 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381. 2024-11-16T06:27:20,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:27:20,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:20,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:20,765 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:27:20,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72d550fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:20,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:27:20,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:27:20,767 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:27:20,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:27:20,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:27:20,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63e6e304, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:20,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:27:20,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:27:20,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:20,769 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34610, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:27:20,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e26b1a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:20,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:27:20,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:27:20,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:27:20,772 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56608, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:27:20,774 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:27:20,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:27:20,775 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53274, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:27:20,776 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381. 2024-11-16T06:27:20,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:27:20,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:20,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:20,777 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:27:20,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-16T06:27:20,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:27:20,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-16T06:27:20,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-16T06:27:20,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-16T06:27:20,780 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:27:20,781 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:27:20,784 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:27:20,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742257_1433 (size=161) 2024-11-16T06:27:20,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742257_1433 (size=161) 2024-11-16T06:27:20,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742257_1433 (size=161) 2024-11-16T06:27:20,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-16T06:27:21,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-16T06:27:21,215 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:27:21,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7}, {pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 558bc9b497211de804309b98bfd4a1bc}] 2024-11-16T06:27:21,216 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:21,216 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:21,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=181 2024-11-16T06:27:21,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=182 2024-11-16T06:27:21,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:21,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(2603): Flush status journal for 5da91dc6c7f65b5b577c46271ab8bfb7: 2024-11-16T06:27:21,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. for emptySnaptb0-testExportWithChecksum completed. 2024-11-16T06:27:21,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-16T06:27:21,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:27:21,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:27:21,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:27:21,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(2603): Flush status journal for 558bc9b497211de804309b98bfd4a1bc: 2024-11-16T06:27:21,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. for emptySnaptb0-testExportWithChecksum completed. 2024-11-16T06:27:21,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-16T06:27:21,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:27:21,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:27:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-16T06:27:21,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742258_1434 (size=68) 2024-11-16T06:27:21,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742258_1434 (size=68) 2024-11-16T06:27:21,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742258_1434 (size=68) 2024-11-16T06:27:21,412 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:27:21,412 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-16T06:27:21,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=182 2024-11-16T06:27:21,413 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:21,413 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:21,417 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 558bc9b497211de804309b98bfd4a1bc in 200 msec 2024-11-16T06:27:21,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742259_1435 (size=68) 2024-11-16T06:27:21,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742259_1435 (size=68) 2024-11-16T06:27:21,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742259_1435 (size=68) 2024-11-16T06:27:21,430 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:21,431 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=181 2024-11-16T06:27:21,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=181 2024-11-16T06:27:21,432 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:21,432 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:21,445 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-11-16T06:27:21,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7 in 221 msec 2024-11-16T06:27:21,446 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:27:21,452 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:27:21,454 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:27:21,455 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:27:21,455 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:27:21,461 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-16T06:27:21,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742260_1436 (size=60) 2024-11-16T06:27:21,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742260_1436 (size=60) 2024-11-16T06:27:21,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742260_1436 (size=60) 2024-11-16T06:27:21,506 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:27:21,506 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-16T06:27:21,507 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-16T06:27:21,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742261_1437 (size=641) 2024-11-16T06:27:21,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742261_1437 (size=641) 2024-11-16T06:27:21,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742261_1437 (size=641) 2024-11-16T06:27:21,567 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:27:21,590 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:27:21,592 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-16T06:27:21,596 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:27:21,596 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-16T06:27:21,599 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 818 msec 2024-11-16T06:27:21,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-16T06:27:21,915 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-16T06:27:21,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:27:21,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37533 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:27:21,926 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-16T06:27:21,929 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-16T06:27:21,929 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:21,929 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:27:21,932 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-16T06:27:21,938 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-16T06:27:21,945 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-16T06:27:21,948 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-16T06:27:21,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738441948 (current time:1731738441948). 2024-11-16T06:27:21,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:27:21,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-16T06:27:21,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:27:21,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5469289c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:21,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:27:21,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:27:21,950 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:27:21,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:27:21,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:27:21,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@215e5667, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:21,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:27:21,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:27:21,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:21,952 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34626, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:27:21,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a954430, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:21,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:27:21,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:27:21,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:27:21,955 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56624, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:27:21,956 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381. 2024-11-16T06:27:21,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:27:21,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:21,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:21,957 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:27:21,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bc535e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:21,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:27:21,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:27:21,958 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:27:21,958 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:27:21,958 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:27:21,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6231aa8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:21,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:27:21,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:27:21,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:21,960 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34646, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:27:21,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64a14504, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:27:21,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:27:21,962 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:27:21,962 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:27:21,963 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56640, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:27:21,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:27:21,966 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:27:21,967 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53284, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:27:21,968 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381. 2024-11-16T06:27:21,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:27:21,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:21,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:27:21,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-16T06:27:21,969 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:27:21,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:27:21,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-16T06:27:21,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-16T06:27:21,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-16T06:27:21,972 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:27:21,973 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:27:21,975 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:27:21,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742262_1438 (size=156) 2024-11-16T06:27:21,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742262_1438 (size=156) 2024-11-16T06:27:21,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742262_1438 (size=156) 2024-11-16T06:27:21,990 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:27:21,990 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7}, {pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 558bc9b497211de804309b98bfd4a1bc}] 2024-11-16T06:27:21,992 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:21,992 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-16T06:27:22,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=185 2024-11-16T06:27:22,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=184 2024-11-16T06:27:22,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:27:22,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:22,145 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2902): Flushing 5da91dc6c7f65b5b577c46271ab8bfb7 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-16T06:27:22,145 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2902): Flushing 558bc9b497211de804309b98bfd4a1bc 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-16T06:27:22,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411161fdcecca1d9d4c28b4f34e220f8234be_558bc9b497211de804309b98bfd4a1bc is 71, key is 142fb3d11da45eeefce8eaa378368171/cf:q/1731738441924/Put/seqid=0 2024-11-16T06:27:22,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241116317b1b81e9994119b86f1379270821a1_5da91dc6c7f65b5b577c46271ab8bfb7 is 71, key is 0602d411f0bed11bc231b1d339629af9/cf:q/1731738441922/Put/seqid=0 2024-11-16T06:27:22,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742264_1440 (size=5242) 2024-11-16T06:27:22,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742264_1440 (size=5242) 2024-11-16T06:27:22,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742264_1440 (size=5242) 2024-11-16T06:27:22,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:27:22,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742263_1439 (size=8031) 2024-11-16T06:27:22,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742263_1439 (size=8031) 2024-11-16T06:27:22,194 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:27:22,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742263_1439 (size=8031) 2024-11-16T06:27:22,197 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241116317b1b81e9994119b86f1379270821a1_5da91dc6c7f65b5b577c46271ab8bfb7 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241116317b1b81e9994119b86f1379270821a1_5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:22,199 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/.tmp/cf/1a85f4e371b342ba88e4820303b9f3bc, store: [table=testtb-testExportWithChecksum family=cf region=5da91dc6c7f65b5b577c46271ab8bfb7] 2024-11-16T06:27:22,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/.tmp/cf/1a85f4e371b342ba88e4820303b9f3bc is 206, key is 0202cb819298576df6df11d5031cb0507/cf:q/1731738441922/Put/seqid=0 2024-11-16T06:27:22,219 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411161fdcecca1d9d4c28b4f34e220f8234be_558bc9b497211de804309b98bfd4a1bc to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202411161fdcecca1d9d4c28b4f34e220f8234be_558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:22,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/.tmp/cf/b1c58290c3b64cc5bed1585f12723457, store: [table=testtb-testExportWithChecksum family=cf region=558bc9b497211de804309b98bfd4a1bc] 2024-11-16T06:27:22,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/.tmp/cf/b1c58290c3b64cc5bed1585f12723457 is 206, key is 1c69292a0df15ad82d3ae423aa2501d80/cf:q/1731738441924/Put/seqid=0 2024-11-16T06:27:22,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742266_1442 (size=14449) 2024-11-16T06:27:22,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742266_1442 (size=14449) 2024-11-16T06:27:22,246 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/.tmp/cf/b1c58290c3b64cc5bed1585f12723457 2024-11-16T06:27:22,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742266_1442 (size=14449) 2024-11-16T06:27:22,252 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/.tmp/cf/b1c58290c3b64cc5bed1585f12723457 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf/b1c58290c3b64cc5bed1585f12723457 2024-11-16T06:27:22,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742265_1441 (size=6310) 2024-11-16T06:27:22,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742265_1441 (size=6310) 2024-11-16T06:27:22,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742265_1441 (size=6310) 2024-11-16T06:27:22,257 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/.tmp/cf/1a85f4e371b342ba88e4820303b9f3bc 2024-11-16T06:27:22,261 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf/b1c58290c3b64cc5bed1585f12723457, entries=45, sequenceid=6, filesize=14.1 K 2024-11-16T06:27:22,262 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 558bc9b497211de804309b98bfd4a1bc in 117ms, sequenceid=6, compaction requested=false 2024-11-16T06:27:22,262 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-16T06:27:22,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/.tmp/cf/1a85f4e371b342ba88e4820303b9f3bc as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/cf/1a85f4e371b342ba88e4820303b9f3bc 2024-11-16T06:27:22,269 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2603): Flush status journal for 558bc9b497211de804309b98bfd4a1bc: 2024-11-16T06:27:22,269 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. for snaptb0-testExportWithChecksum completed. 2024-11-16T06:27:22,269 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-16T06:27:22,269 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:27:22,269 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf/b1c58290c3b64cc5bed1585f12723457] hfiles 2024-11-16T06:27:22,269 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf/b1c58290c3b64cc5bed1585f12723457 for snapshot=snaptb0-testExportWithChecksum 2024-11-16T06:27:22,272 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/cf/1a85f4e371b342ba88e4820303b9f3bc, entries=5, sequenceid=6, filesize=6.2 K 2024-11-16T06:27:22,274 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 5da91dc6c7f65b5b577c46271ab8bfb7 in 129ms, sequenceid=6, compaction requested=false 2024-11-16T06:27:22,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2603): Flush status journal for 5da91dc6c7f65b5b577c46271ab8bfb7: 2024-11-16T06:27:22,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. for snaptb0-testExportWithChecksum completed. 2024-11-16T06:27:22,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-16T06:27:22,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:27:22,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/cf/1a85f4e371b342ba88e4820303b9f3bc] hfiles 2024-11-16T06:27:22,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/cf/1a85f4e371b342ba88e4820303b9f3bc for snapshot=snaptb0-testExportWithChecksum 2024-11-16T06:27:22,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-16T06:27:22,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742267_1443 (size=107) 2024-11-16T06:27:22,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742267_1443 (size=107) 2024-11-16T06:27:22,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742267_1443 (size=107) 2024-11-16T06:27:22,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:27:22,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-11-16T06:27:22,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=184 2024-11-16T06:27:22,308 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:22,308 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:22,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7 in 319 msec 2024-11-16T06:27:22,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742268_1444 (size=107) 2024-11-16T06:27:22,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742268_1444 (size=107) 2024-11-16T06:27:22,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742268_1444 (size=107) 2024-11-16T06:27:22,333 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:27:22,333 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=185 2024-11-16T06:27:22,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=185 2024-11-16T06:27:22,334 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:22,334 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:22,338 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=185, resume processing ppid=183 2024-11-16T06:27:22,338 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 558bc9b497211de804309b98bfd4a1bc in 346 msec 2024-11-16T06:27:22,338 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:27:22,340 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:27:22,341 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:27:22,341 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:27:22,342 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:27:22,343 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202411161fdcecca1d9d4c28b4f34e220f8234be_558bc9b497211de804309b98bfd4a1bc, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241116317b1b81e9994119b86f1379270821a1_5da91dc6c7f65b5b577c46271ab8bfb7] hfiles 2024-11-16T06:27:22,343 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202411161fdcecca1d9d4c28b4f34e220f8234be_558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:27:22,343 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241116317b1b81e9994119b86f1379270821a1_5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:27:22,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742269_1445 (size=291) 2024-11-16T06:27:22,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742269_1445 (size=291) 2024-11-16T06:27:22,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742269_1445 (size=291) 2024-11-16T06:27:22,385 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:27:22,385 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-16T06:27:22,386 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-16T06:27:22,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742270_1446 (size=951) 2024-11-16T06:27:22,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742270_1446 (size=951) 2024-11-16T06:27:22,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742270_1446 (size=951) 2024-11-16T06:27:22,423 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:27:22,433 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:27:22,433 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-16T06:27:22,440 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:27:22,440 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-16T06:27:22,442 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 471 msec 2024-11-16T06:27:22,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-16T06:27:22,595 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-16T06:27:22,595 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738442595 2024-11-16T06:27:22,596 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738442595, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738442595, srcFsUri=hdfs://localhost:36767, srcDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:27:22,637 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:36767, inputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:27:22,637 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@30736072, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738442595, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738442595/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-16T06:27:22,639 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-16T06:27:22,647 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738442595/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-16T06:27:22,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:22,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:22,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:23,793 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-6749324370715331090.jar 2024-11-16T06:27:23,793 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:23,793 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:23,862 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-12783890465242742429.jar 2024-11-16T06:27:23,862 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:23,862 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:23,862 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:23,862 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:23,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:23,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:23,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-16T06:27:23,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-16T06:27:23,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-16T06:27:23,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-16T06:27:23,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-16T06:27:23,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-16T06:27:23,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-16T06:27:23,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-16T06:27:23,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-16T06:27:23,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-16T06:27:23,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-16T06:27:23,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:27:23,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:27:23,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:27:23,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:27:23,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:27:23,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:27:23,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:27:23,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742271_1447 (size=131440) 2024-11-16T06:27:23,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742271_1447 (size=131440) 2024-11-16T06:27:23,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742271_1447 (size=131440) 2024-11-16T06:27:23,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742272_1448 (size=4188619) 2024-11-16T06:27:23,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742272_1448 (size=4188619) 2024-11-16T06:27:23,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742272_1448 (size=4188619) 2024-11-16T06:27:24,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742273_1449 (size=1323991) 2024-11-16T06:27:24,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742273_1449 (size=1323991) 2024-11-16T06:27:24,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742273_1449 (size=1323991) 2024-11-16T06:27:24,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742274_1450 (size=903732) 2024-11-16T06:27:24,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742274_1450 (size=903732) 2024-11-16T06:27:24,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742274_1450 (size=903732) 2024-11-16T06:27:24,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742275_1451 (size=8360083) 2024-11-16T06:27:24,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742275_1451 (size=8360083) 2024-11-16T06:27:24,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742275_1451 (size=8360083) 2024-11-16T06:27:24,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742276_1452 (size=6424738) 2024-11-16T06:27:24,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742276_1452 (size=6424738) 2024-11-16T06:27:24,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742276_1452 (size=6424738) 2024-11-16T06:27:24,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742277_1453 (size=1877034) 2024-11-16T06:27:24,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742277_1453 (size=1877034) 2024-11-16T06:27:24,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742277_1453 (size=1877034) 2024-11-16T06:27:24,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742278_1454 (size=77835) 2024-11-16T06:27:24,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742278_1454 (size=77835) 2024-11-16T06:27:24,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742278_1454 (size=77835) 2024-11-16T06:27:24,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742279_1455 (size=30949) 2024-11-16T06:27:24,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742279_1455 (size=30949) 2024-11-16T06:27:24,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742279_1455 (size=30949) 2024-11-16T06:27:24,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742280_1456 (size=1597327) 2024-11-16T06:27:24,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742280_1456 (size=1597327) 2024-11-16T06:27:24,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742280_1456 (size=1597327) 2024-11-16T06:27:24,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742281_1457 (size=4695811) 2024-11-16T06:27:24,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742281_1457 (size=4695811) 2024-11-16T06:27:24,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742281_1457 (size=4695811) 2024-11-16T06:27:24,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742282_1458 (size=232957) 2024-11-16T06:27:24,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742282_1458 (size=232957) 2024-11-16T06:27:24,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742282_1458 (size=232957) 2024-11-16T06:27:24,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742283_1459 (size=127628) 2024-11-16T06:27:24,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742283_1459 (size=127628) 2024-11-16T06:27:24,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742283_1459 (size=127628) 2024-11-16T06:27:24,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742284_1460 (size=20406) 2024-11-16T06:27:24,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742284_1460 (size=20406) 2024-11-16T06:27:24,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742284_1460 (size=20406) 2024-11-16T06:27:24,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742285_1461 (size=5175431) 2024-11-16T06:27:24,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742285_1461 (size=5175431) 2024-11-16T06:27:24,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742285_1461 (size=5175431) 2024-11-16T06:27:24,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742286_1462 (size=217634) 2024-11-16T06:27:24,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742286_1462 (size=217634) 2024-11-16T06:27:24,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742286_1462 (size=217634) 2024-11-16T06:27:24,565 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0007_000001 (auth:SIMPLE) from 127.0.0.1:48726 2024-11-16T06:27:24,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742287_1463 (size=1832290) 2024-11-16T06:27:24,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742287_1463 (size=1832290) 2024-11-16T06:27:24,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742287_1463 (size=1832290) 2024-11-16T06:27:24,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742288_1464 (size=322274) 2024-11-16T06:27:24,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742288_1464 (size=322274) 2024-11-16T06:27:24,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742288_1464 (size=322274) 2024-11-16T06:27:24,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742289_1465 (size=440656) 2024-11-16T06:27:24,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742289_1465 (size=440656) 2024-11-16T06:27:24,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742289_1465 (size=440656) 2024-11-16T06:27:24,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742290_1466 (size=503880) 2024-11-16T06:27:24,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742290_1466 (size=503880) 2024-11-16T06:27:24,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742290_1466 (size=503880) 2024-11-16T06:27:24,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742291_1467 (size=29229) 2024-11-16T06:27:24,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742291_1467 (size=29229) 2024-11-16T06:27:24,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742291_1467 (size=29229) 2024-11-16T06:27:24,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742292_1468 (size=24096) 2024-11-16T06:27:24,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742292_1468 (size=24096) 2024-11-16T06:27:24,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742292_1468 (size=24096) 2024-11-16T06:27:24,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742293_1469 (size=111872) 2024-11-16T06:27:24,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742293_1469 (size=111872) 2024-11-16T06:27:24,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742293_1469 (size=111872) 2024-11-16T06:27:24,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742294_1470 (size=45609) 2024-11-16T06:27:24,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742294_1470 (size=45609) 2024-11-16T06:27:24,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742294_1470 (size=45609) 2024-11-16T06:27:24,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742295_1471 (size=136454) 2024-11-16T06:27:24,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742295_1471 (size=136454) 2024-11-16T06:27:24,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742295_1471 (size=136454) 2024-11-16T06:27:24,744 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-16T06:27:24,746 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-16T06:27:24,748 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.2 K 2024-11-16T06:27:24,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742296_1472 (size=714) 2024-11-16T06:27:24,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742296_1472 (size=714) 2024-11-16T06:27:24,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742296_1472 (size=714) 2024-11-16T06:27:24,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742297_1473 (size=15) 2024-11-16T06:27:24,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742297_1473 (size=15) 2024-11-16T06:27:24,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742297_1473 (size=15) 2024-11-16T06:27:24,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742298_1474 (size=303774) 2024-11-16T06:27:24,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742298_1474 (size=303774) 2024-11-16T06:27:24,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742298_1474 (size=303774) 2024-11-16T06:27:24,830 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:27:24,830 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:27:25,321 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:27:25,561 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0008_000001 (auth:SIMPLE) from 127.0.0.1:48732 2024-11-16T06:27:28,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-16T06:27:28,275 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-16T06:27:28,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-16T06:27:29,680 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_3/usercache/jenkins/appcache/application_1731738285782_0007/container_1731738285782_0007_01_000001/launch_container.sh] 2024-11-16T06:27:29,680 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_3/usercache/jenkins/appcache/application_1731738285782_0007/container_1731738285782_0007_01_000001/container_tokens] 2024-11-16T06:27:29,680 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_3/usercache/jenkins/appcache/application_1731738285782_0007/container_1731738285782_0007_01_000001/sysfs] 2024-11-16T06:27:31,639 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0008_000001 (auth:SIMPLE) from 127.0.0.1:33002 2024-11-16T06:27:31,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742299_1475 (size=349424) 2024-11-16T06:27:31,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742299_1475 (size=349424) 2024-11-16T06:27:31,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742299_1475 (size=349424) 2024-11-16T06:27:33,779 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:27:33,917 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0008_000001 (auth:SIMPLE) from 127.0.0.1:43886 2024-11-16T06:27:36,746 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:27:37,558 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_3/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000002/launch_container.sh] 2024-11-16T06:27:37,559 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_3/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000002/container_tokens] 2024-11-16T06:27:37,559 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_3/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf/b1c58290c3b64cc5bed1585f12723457 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738442595/archive/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf/b1c58290c3b64cc5bed1585f12723457. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-16T06:27:38,727 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0008_000001 (auth:SIMPLE) from 127.0.0.1:43888 2024-11-16T06:27:41,489 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 28ca42946236efd20803c06170fc6c61, had cached 0 bytes from a total of 6088 2024-11-16T06:27:41,491 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 15023cc25d03bb61082803e08a9c5ef3, had cached 0 bytes from a total of 14463 2024-11-16T06:27:41,606 DEBUG [master/3456ee6a3164:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 15023cc25d03bb61082803e08a9c5ef3 changed from -1.0 to 0.0, refreshing cache 2024-11-16T06:27:41,606 DEBUG [master/3456ee6a3164:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5da91dc6c7f65b5b577c46271ab8bfb7 changed from -1.0 to 0.0, refreshing cache 2024-11-16T06:27:41,607 DEBUG [master/3456ee6a3164:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 28ca42946236efd20803c06170fc6c61 changed from -1.0 to 0.0, refreshing cache 2024-11-16T06:27:41,607 DEBUG [master/3456ee6a3164:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 558bc9b497211de804309b98bfd4a1bc changed from -1.0 to 0.0, refreshing cache 2024-11-16T06:27:42,528 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_1/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000003/launch_container.sh] 2024-11-16T06:27:42,529 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_1/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000003/container_tokens] 2024-11-16T06:27:42,529 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_1/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf/b1c58290c3b64cc5bed1585f12723457 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738442595/archive/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf/b1c58290c3b64cc5bed1585f12723457. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-16T06:27:43,751 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0008_000001 (auth:SIMPLE) from 127.0.0.1:41500 2024-11-16T06:27:47,415 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_0/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000004/launch_container.sh] 2024-11-16T06:27:47,415 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_0/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000004/container_tokens] 2024-11-16T06:27:47,415 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_0/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf/b1c58290c3b64cc5bed1585f12723457 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/local-export-1731738442595/archive/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf/b1c58290c3b64cc5bed1585f12723457. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-16T06:27:48,757 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0008_000001 (auth:SIMPLE) from 127.0.0.1:41506 2024-11-16T06:27:51,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742300_1476 (size=21330) 2024-11-16T06:27:51,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742300_1476 (size=21330) 2024-11-16T06:27:51,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742300_1476 (size=21330) 2024-11-16T06:27:51,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742301_1477 (size=460) 2024-11-16T06:27:51,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742301_1477 (size=460) 2024-11-16T06:27:51,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742301_1477 (size=460) 2024-11-16T06:27:51,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742302_1478 (size=21330) 2024-11-16T06:27:51,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742302_1478 (size=21330) 2024-11-16T06:27:51,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742302_1478 (size=21330) 2024-11-16T06:27:51,812 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_3/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000005/launch_container.sh] 2024-11-16T06:27:51,813 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_3/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000005/container_tokens] 2024-11-16T06:27:51,813 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_3/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000005/sysfs] 2024-11-16T06:27:51,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742303_1479 (size=349424) 2024-11-16T06:27:51,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742303_1479 (size=349424) 2024-11-16T06:27:51,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742303_1479 (size=349424) 2024-11-16T06:27:51,838 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0008_000001 (auth:SIMPLE) from 127.0.0.1:56712 2024-11-16T06:27:53,072 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1230): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1731738285782_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:938) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1207) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:352) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T06:27:53,073 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738473073 2024-11-16T06:27:53,073 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:36767, tgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738473073, rawTgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738473073, srcFsUri=hdfs://localhost:36767, srcDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:27:53,101 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:36767, inputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:27:53,101 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738473073, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738473073/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-16T06:27:53,103 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-16T06:27:53,107 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738473073/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-16T06:27:53,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742304_1480 (size=156) 2024-11-16T06:27:53,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742304_1480 (size=156) 2024-11-16T06:27:53,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742304_1480 (size=156) 2024-11-16T06:27:53,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742305_1481 (size=951) 2024-11-16T06:27:53,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742305_1481 (size=951) 2024-11-16T06:27:53,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742305_1481 (size=951) 2024-11-16T06:27:53,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:53,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:53,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:54,045 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-15370842122787757019.jar 2024-11-16T06:27:54,046 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:54,046 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:54,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-13884705263174333710.jar 2024-11-16T06:27:54,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:54,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:54,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:54,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:54,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:54,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:27:54,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-16T06:27:54,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-16T06:27:54,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-16T06:27:54,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-16T06:27:54,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-16T06:27:54,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-16T06:27:54,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-16T06:27:54,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-16T06:27:54,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-16T06:27:54,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-16T06:27:54,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-16T06:27:54,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:27:54,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:27:54,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:27:54,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:27:54,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:27:54,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:27:54,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:27:54,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742306_1482 (size=131440) 2024-11-16T06:27:54,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742306_1482 (size=131440) 2024-11-16T06:27:54,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742306_1482 (size=131440) 2024-11-16T06:27:54,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742307_1483 (size=4188619) 2024-11-16T06:27:54,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742307_1483 (size=4188619) 2024-11-16T06:27:54,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742307_1483 (size=4188619) 2024-11-16T06:27:54,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742308_1484 (size=1323991) 2024-11-16T06:27:54,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742308_1484 (size=1323991) 2024-11-16T06:27:54,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742308_1484 (size=1323991) 2024-11-16T06:27:54,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742309_1485 (size=903732) 2024-11-16T06:27:54,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742309_1485 (size=903732) 2024-11-16T06:27:54,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742309_1485 (size=903732) 2024-11-16T06:27:54,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742310_1486 (size=8360083) 2024-11-16T06:27:54,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742310_1486 (size=8360083) 2024-11-16T06:27:54,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742310_1486 (size=8360083) 2024-11-16T06:27:54,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742311_1487 (size=1877034) 2024-11-16T06:27:54,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742311_1487 (size=1877034) 2024-11-16T06:27:54,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742311_1487 (size=1877034) 2024-11-16T06:27:54,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742312_1488 (size=77835) 2024-11-16T06:27:54,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742312_1488 (size=77835) 2024-11-16T06:27:54,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742312_1488 (size=77835) 2024-11-16T06:27:54,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742313_1489 (size=30949) 2024-11-16T06:27:54,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742313_1489 (size=30949) 2024-11-16T06:27:54,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742313_1489 (size=30949) 2024-11-16T06:27:54,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742314_1490 (size=1597327) 2024-11-16T06:27:54,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742314_1490 (size=1597327) 2024-11-16T06:27:54,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742314_1490 (size=1597327) 2024-11-16T06:27:54,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742315_1491 (size=440656) 2024-11-16T06:27:54,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742315_1491 (size=440656) 2024-11-16T06:27:54,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742315_1491 (size=440656) 2024-11-16T06:27:54,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742316_1492 (size=4695811) 2024-11-16T06:27:54,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742316_1492 (size=4695811) 2024-11-16T06:27:54,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742316_1492 (size=4695811) 2024-11-16T06:27:54,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742317_1493 (size=232957) 2024-11-16T06:27:54,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742317_1493 (size=232957) 2024-11-16T06:27:54,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742317_1493 (size=232957) 2024-11-16T06:27:54,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742318_1494 (size=127628) 2024-11-16T06:27:54,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742318_1494 (size=127628) 2024-11-16T06:27:54,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742318_1494 (size=127628) 2024-11-16T06:27:54,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742319_1495 (size=20406) 2024-11-16T06:27:54,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742319_1495 (size=20406) 2024-11-16T06:27:54,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742319_1495 (size=20406) 2024-11-16T06:27:54,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742320_1496 (size=5175431) 2024-11-16T06:27:54,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742320_1496 (size=5175431) 2024-11-16T06:27:54,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742320_1496 (size=5175431) 2024-11-16T06:27:54,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742321_1497 (size=217634) 2024-11-16T06:27:54,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742321_1497 (size=217634) 2024-11-16T06:27:54,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742321_1497 (size=217634) 2024-11-16T06:27:54,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742322_1498 (size=1832290) 2024-11-16T06:27:54,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742322_1498 (size=1832290) 2024-11-16T06:27:54,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742322_1498 (size=1832290) 2024-11-16T06:27:54,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742323_1499 (size=322274) 2024-11-16T06:27:54,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742323_1499 (size=322274) 2024-11-16T06:27:54,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742323_1499 (size=322274) 2024-11-16T06:27:54,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742324_1500 (size=503880) 2024-11-16T06:27:54,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742324_1500 (size=503880) 2024-11-16T06:27:54,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742324_1500 (size=503880) 2024-11-16T06:27:54,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742325_1501 (size=29229) 2024-11-16T06:27:54,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742325_1501 (size=29229) 2024-11-16T06:27:54,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742325_1501 (size=29229) 2024-11-16T06:27:54,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742326_1502 (size=24096) 2024-11-16T06:27:54,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742326_1502 (size=24096) 2024-11-16T06:27:54,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742326_1502 (size=24096) 2024-11-16T06:27:54,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742327_1503 (size=111872) 2024-11-16T06:27:54,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742327_1503 (size=111872) 2024-11-16T06:27:54,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742327_1503 (size=111872) 2024-11-16T06:27:54,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742328_1504 (size=6424738) 2024-11-16T06:27:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742328_1504 (size=6424738) 2024-11-16T06:27:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742328_1504 (size=6424738) 2024-11-16T06:27:54,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742329_1505 (size=45609) 2024-11-16T06:27:54,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742329_1505 (size=45609) 2024-11-16T06:27:54,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742329_1505 (size=45609) 2024-11-16T06:27:54,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742330_1506 (size=136454) 2024-11-16T06:27:54,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742330_1506 (size=136454) 2024-11-16T06:27:54,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742330_1506 (size=136454) 2024-11-16T06:27:54,561 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-16T06:27:54,563 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-16T06:27:54,565 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.2 K 2024-11-16T06:27:54,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742331_1507 (size=714) 2024-11-16T06:27:54,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742331_1507 (size=714) 2024-11-16T06:27:54,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742331_1507 (size=714) 2024-11-16T06:27:54,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742332_1508 (size=15) 2024-11-16T06:27:54,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742332_1508 (size=15) 2024-11-16T06:27:54,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742332_1508 (size=15) 2024-11-16T06:27:54,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742333_1509 (size=303728) 2024-11-16T06:27:54,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742333_1509 (size=303728) 2024-11-16T06:27:54,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742333_1509 (size=303728) 2024-11-16T06:27:57,929 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:27:57,929 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:27:57,933 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0008_000001 (auth:SIMPLE) from 127.0.0.1:56714 2024-11-16T06:27:57,948 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_2/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000001/launch_container.sh] 2024-11-16T06:27:57,948 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_2/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000001/container_tokens] 2024-11-16T06:27:57,948 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_2/usercache/jenkins/appcache/application_1731738285782_0008/container_1731738285782_0008_01_000001/sysfs] 2024-11-16T06:27:58,642 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0009_000001 (auth:SIMPLE) from 127.0.0.1:37658 2024-11-16T06:28:04,273 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0009_000001 (auth:SIMPLE) from 127.0.0.1:49848 2024-11-16T06:28:04,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742334_1510 (size=349378) 2024-11-16T06:28:04,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742334_1510 (size=349378) 2024-11-16T06:28:04,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742334_1510 (size=349378) 2024-11-16T06:28:05,512 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5da91dc6c7f65b5b577c46271ab8bfb7, had cached 0 bytes from a total of 6310 2024-11-16T06:28:05,512 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 558bc9b497211de804309b98bfd4a1bc, had cached 0 bytes from a total of 14449 2024-11-16T06:28:06,486 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0009_000001 (auth:SIMPLE) from 127.0.0.1:48826 2024-11-16T06:28:06,746 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:28:09,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742335_1511 (size=14449) 2024-11-16T06:28:09,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742335_1511 (size=14449) 2024-11-16T06:28:09,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742335_1511 (size=14449) 2024-11-16T06:28:09,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742336_1512 (size=8031) 2024-11-16T06:28:09,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742336_1512 (size=8031) 2024-11-16T06:28:09,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742336_1512 (size=8031) 2024-11-16T06:28:09,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742337_1513 (size=6310) 2024-11-16T06:28:09,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742337_1513 (size=6310) 2024-11-16T06:28:09,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742337_1513 (size=6310) 2024-11-16T06:28:09,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742338_1514 (size=5242) 2024-11-16T06:28:09,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742338_1514 (size=5242) 2024-11-16T06:28:09,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742338_1514 (size=5242) 2024-11-16T06:28:09,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742339_1515 (size=17459) 2024-11-16T06:28:09,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742339_1515 (size=17459) 2024-11-16T06:28:09,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742339_1515 (size=17459) 2024-11-16T06:28:09,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742340_1516 (size=462) 2024-11-16T06:28:09,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742340_1516 (size=462) 2024-11-16T06:28:09,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742340_1516 (size=462) 2024-11-16T06:28:09,838 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_1/usercache/jenkins/appcache/application_1731738285782_0009/container_1731738285782_0009_01_000002/launch_container.sh] 2024-11-16T06:28:09,839 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_1/usercache/jenkins/appcache/application_1731738285782_0009/container_1731738285782_0009_01_000002/container_tokens] 2024-11-16T06:28:09,839 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_1/usercache/jenkins/appcache/application_1731738285782_0009/container_1731738285782_0009_01_000002/sysfs] 2024-11-16T06:28:09,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742341_1517 (size=17459) 2024-11-16T06:28:09,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742341_1517 (size=17459) 2024-11-16T06:28:09,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742341_1517 (size=17459) 2024-11-16T06:28:09,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742342_1518 (size=349378) 2024-11-16T06:28:09,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742342_1518 (size=349378) 2024-11-16T06:28:09,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742342_1518 (size=349378) 2024-11-16T06:28:09,913 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0009_000001 (auth:SIMPLE) from 127.0.0.1:50398 2024-11-16T06:28:11,759 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-16T06:28:11,759 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-16T06:28:11,765 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportWithChecksum 2024-11-16T06:28:11,765 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-16T06:28:11,766 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-16T06:28:11,766 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-16T06:28:11,766 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-16T06:28:11,766 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-16T06:28:11,766 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738473073/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738473073/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-16T06:28:11,766 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738473073/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-16T06:28:11,766 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738473073/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-16T06:28:11,772 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-11-16T06:28:11,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=186, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-16T06:28:11,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-16T06:28:11,775 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738491775"}]},"ts":"1731738491775"} 2024-11-16T06:28:11,777 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-16T06:28:11,777 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-16T06:28:11,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=187, ppid=186, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-16T06:28:11,780 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5da91dc6c7f65b5b577c46271ab8bfb7, UNASSIGN}, {pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=558bc9b497211de804309b98bfd4a1bc, UNASSIGN}] 2024-11-16T06:28:11,781 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=558bc9b497211de804309b98bfd4a1bc, UNASSIGN 2024-11-16T06:28:11,781 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5da91dc6c7f65b5b577c46271ab8bfb7, UNASSIGN 2024-11-16T06:28:11,782 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=558bc9b497211de804309b98bfd4a1bc, regionState=CLOSING, regionLocation=3456ee6a3164,37533,1731738278939 2024-11-16T06:28:11,783 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=5da91dc6c7f65b5b577c46271ab8bfb7, regionState=CLOSING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:28:11,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=558bc9b497211de804309b98bfd4a1bc, UNASSIGN because future has completed 2024-11-16T06:28:11,784 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:28:11,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure 558bc9b497211de804309b98bfd4a1bc, server=3456ee6a3164,37533,1731738278939}] 2024-11-16T06:28:11,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5da91dc6c7f65b5b577c46271ab8bfb7, UNASSIGN because future has completed 2024-11-16T06:28:11,785 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:28:11,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=191, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:28:11,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-16T06:28:11,937 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(122): Close 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:28:11,937 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:28:11,937 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(122): Close 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:28:11,937 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1722): Closing 558bc9b497211de804309b98bfd4a1bc, disabling compactions & flushes 2024-11-16T06:28:11,937 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:28:11,937 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:28:11,937 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:28:11,937 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1722): Closing 5da91dc6c7f65b5b577c46271ab8bfb7, disabling compactions & flushes 2024-11-16T06:28:11,937 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. after waiting 0 ms 2024-11-16T06:28:11,937 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:28:11,937 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:28:11,937 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:28:11,937 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. after waiting 0 ms 2024-11-16T06:28:11,937 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:28:11,941 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:28:11,941 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:28:11,942 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:28:11,942 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:28:11,942 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7. 2024-11-16T06:28:11,942 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc. 2024-11-16T06:28:11,942 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1676): Region close journal for 558bc9b497211de804309b98bfd4a1bc: Waiting for close lock at 1731738491937Running coprocessor pre-close hooks at 1731738491937Disabling compacts and flushes for region at 1731738491937Disabling writes for close at 1731738491937Writing region close event to WAL at 1731738491938 (+1 ms)Running coprocessor post-close hooks at 1731738491942 (+4 ms)Closed at 1731738491942 2024-11-16T06:28:11,942 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1676): Region close journal for 5da91dc6c7f65b5b577c46271ab8bfb7: Waiting for close lock at 1731738491937Running coprocessor pre-close hooks at 1731738491937Disabling compacts and flushes for region at 1731738491937Disabling writes for close at 1731738491937Writing region close event to WAL at 1731738491938 (+1 ms)Running coprocessor post-close hooks at 1731738491942 (+4 ms)Closed at 1731738491942 2024-11-16T06:28:11,943 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(157): Closed 558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:28:11,944 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=558bc9b497211de804309b98bfd4a1bc, regionState=CLOSED 2024-11-16T06:28:11,944 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(157): Closed 5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:28:11,945 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=5da91dc6c7f65b5b577c46271ab8bfb7, regionState=CLOSED 2024-11-16T06:28:11,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure 558bc9b497211de804309b98bfd4a1bc, server=3456ee6a3164,37533,1731738278939 because future has completed 2024-11-16T06:28:11,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:28:11,948 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=189 2024-11-16T06:28:11,949 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=188 2024-11-16T06:28:11,949 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=188, state=SUCCESS, hasLock=false; CloseRegionProcedure 5da91dc6c7f65b5b577c46271ab8bfb7, server=3456ee6a3164,35939,1731738278639 in 162 msec 2024-11-16T06:28:11,949 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=558bc9b497211de804309b98bfd4a1bc, UNASSIGN in 168 msec 2024-11-16T06:28:11,950 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; CloseRegionProcedure 558bc9b497211de804309b98bfd4a1bc, server=3456ee6a3164,37533,1731738278939 in 163 msec 2024-11-16T06:28:11,950 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=188, resume processing ppid=187 2024-11-16T06:28:11,950 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5da91dc6c7f65b5b577c46271ab8bfb7, UNASSIGN in 169 msec 2024-11-16T06:28:11,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=186 2024-11-16T06:28:11,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=186, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 173 msec 2024-11-16T06:28:11,953 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738491953"}]},"ts":"1731738491953"} 2024-11-16T06:28:11,954 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-16T06:28:11,955 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-16T06:28:11,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 183 msec 2024-11-16T06:28:12,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-16T06:28:12,094 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-16T06:28:12,095 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-11-16T06:28:12,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-16T06:28:12,097 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-16T06:28:12,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-16T06:28:12,097 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=192, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-16T06:28:12,100 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-16T06:28:12,102 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:28:12,103 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:28:12,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-16T06:28:12,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-16T06:28:12,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-16T06:28:12,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-16T06:28:12,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:28:12,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:28:12,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:28:12,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-11-16T06:28:12,107 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/recovered.edits] 2024-11-16T06:28:12,107 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-16T06:28:12,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-11-16T06:28:12,107 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-16T06:28:12,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:28:12,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-11-16T06:28:12,107 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-16T06:28:12,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-11-16T06:28:12,107 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-16T06:28:12,108 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:12,108 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:12,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=192 2024-11-16T06:28:12,108 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/recovered.edits] 2024-11-16T06:28:12,108 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:12,109 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:12,111 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/cf/1a85f4e371b342ba88e4820303b9f3bc to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/cf/1a85f4e371b342ba88e4820303b9f3bc 2024-11-16T06:28:12,111 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf/b1c58290c3b64cc5bed1585f12723457 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/cf/b1c58290c3b64cc5bed1585f12723457 2024-11-16T06:28:12,113 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7/recovered.edits/9.seqid 2024-11-16T06:28:12,113 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc/recovered.edits/9.seqid 2024-11-16T06:28:12,114 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:28:12,114 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportWithChecksum/5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:28:12,114 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-16T06:28:12,114 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-11-16T06:28:12,115 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-11-16T06:28:12,118 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202411161fdcecca1d9d4c28b4f34e220f8234be_558bc9b497211de804309b98bfd4a1bc to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202411161fdcecca1d9d4c28b4f34e220f8234be_558bc9b497211de804309b98bfd4a1bc 2024-11-16T06:28:12,119 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241116317b1b81e9994119b86f1379270821a1_5da91dc6c7f65b5b577c46271ab8bfb7 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241116317b1b81e9994119b86f1379270821a1_5da91dc6c7f65b5b577c46271ab8bfb7 2024-11-16T06:28:12,120 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-11-16T06:28:12,122 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=192, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-16T06:28:12,125 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-16T06:28:12,127 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-16T06:28:12,129 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=192, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-16T06:28:12,129 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-16T06:28:12,129 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738492129"}]},"ts":"9223372036854775807"} 2024-11-16T06:28:12,129 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738492129"}]},"ts":"9223372036854775807"} 2024-11-16T06:28:12,131 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-16T06:28:12,131 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5da91dc6c7f65b5b577c46271ab8bfb7, NAME => 'testtb-testExportWithChecksum,,1731738440098.5da91dc6c7f65b5b577c46271ab8bfb7.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 558bc9b497211de804309b98bfd4a1bc, NAME => 'testtb-testExportWithChecksum,1,1731738440098.558bc9b497211de804309b98bfd4a1bc.', STARTKEY => '1', ENDKEY => ''}] 2024-11-16T06:28:12,131 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-16T06:28:12,132 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731738492131"}]},"ts":"9223372036854775807"} 2024-11-16T06:28:12,134 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-16T06:28:12,135 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=192, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-16T06:28:12,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 40 msec 2024-11-16T06:28:12,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=192 2024-11-16T06:28:12,215 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-16T06:28:12,215 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-16T06:28:12,227 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-16T06:28:12,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-16T06:28:12,231 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-16T06:28:12,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-16T06:28:12,261 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=805 (was 807), OpenFileDescriptor=808 (was 817), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=496 (was 447) - SystemLoadAverage LEAK? -, ProcessCount=15 (was 15), AvailableMemoryMB=2220 (was 2023) - AvailableMemoryMB LEAK? - 2024-11-16T06:28:12,261 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-16T06:28:12,282 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=805, OpenFileDescriptor=808, MaxFileDescriptor=1048576, SystemLoadAverage=496, ProcessCount=15, AvailableMemoryMB=2219 2024-11-16T06:28:12,282 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-16T06:28:12,284 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T06:28:12,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:12,286 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T06:28:12,287 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 193 2024-11-16T06:28:12,287 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T06:28:12,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-16T06:28:12,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742343_1519 (size=454) 2024-11-16T06:28:12,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742343_1519 (size=454) 2024-11-16T06:28:12,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742343_1519 (size=454) 2024-11-16T06:28:12,297 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d7ccc746561a2906614c37d63eb65d7e, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:28:12,298 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => a838fb955540fe2b189db5690cef5605, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:28:12,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742344_1520 (size=79) 2024-11-16T06:28:12,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742344_1520 (size=79) 2024-11-16T06:28:12,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742344_1520 (size=79) 2024-11-16T06:28:12,317 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:28:12,317 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing d7ccc746561a2906614c37d63eb65d7e, disabling compactions & flushes 2024-11-16T06:28:12,317 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:12,317 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:12,317 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. after waiting 0 ms 2024-11-16T06:28:12,317 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:12,317 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:12,317 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for d7ccc746561a2906614c37d63eb65d7e: Waiting for close lock at 1731738492317Disabling compacts and flushes for region at 1731738492317Disabling writes for close at 1731738492317Writing region close event to WAL at 1731738492317Closed at 1731738492317 2024-11-16T06:28:12,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742345_1521 (size=79) 2024-11-16T06:28:12,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742345_1521 (size=79) 2024-11-16T06:28:12,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742345_1521 (size=79) 2024-11-16T06:28:12,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:28:12,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing a838fb955540fe2b189db5690cef5605, disabling compactions & flushes 2024-11-16T06:28:12,343 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:12,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:12,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. after waiting 0 ms 2024-11-16T06:28:12,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:12,343 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:12,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for a838fb955540fe2b189db5690cef5605: Waiting for close lock at 1731738492342Disabling compacts and flushes for region at 1731738492342Disabling writes for close at 1731738492343 (+1 ms)Writing region close event to WAL at 1731738492343Closed at 1731738492343 2024-11-16T06:28:12,344 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T06:28:12,344 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1731738492344"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738492344"}]},"ts":"1731738492344"} 2024-11-16T06:28:12,344 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1731738492344"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731738492344"}]},"ts":"1731738492344"} 2024-11-16T06:28:12,346 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-16T06:28:12,347 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T06:28:12,347 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738492347"}]},"ts":"1731738492347"} 2024-11-16T06:28:12,349 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-16T06:28:12,349 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {3456ee6a3164=0} racks are {/default-rack=0} 2024-11-16T06:28:12,350 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-16T06:28:12,350 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-16T06:28:12,350 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-16T06:28:12,350 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-16T06:28:12,350 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-16T06:28:12,350 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-16T06:28:12,350 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-16T06:28:12,350 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-16T06:28:12,350 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-16T06:28:12,350 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-16T06:28:12,351 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d7ccc746561a2906614c37d63eb65d7e, ASSIGN}, {pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a838fb955540fe2b189db5690cef5605, ASSIGN}] 2024-11-16T06:28:12,352 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d7ccc746561a2906614c37d63eb65d7e, ASSIGN 2024-11-16T06:28:12,352 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a838fb955540fe2b189db5690cef5605, ASSIGN 2024-11-16T06:28:12,353 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a838fb955540fe2b189db5690cef5605, ASSIGN; state=OFFLINE, location=3456ee6a3164,40155,1731738278871; forceNewPlan=false, retain=false 2024-11-16T06:28:12,353 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d7ccc746561a2906614c37d63eb65d7e, ASSIGN; state=OFFLINE, location=3456ee6a3164,35939,1731738278639; forceNewPlan=false, retain=false 2024-11-16T06:28:12,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-16T06:28:12,503 INFO [3456ee6a3164:39381 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-16T06:28:12,504 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=d7ccc746561a2906614c37d63eb65d7e, regionState=OPENING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:28:12,504 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=a838fb955540fe2b189db5690cef5605, regionState=OPENING, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:28:12,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d7ccc746561a2906614c37d63eb65d7e, ASSIGN because future has completed 2024-11-16T06:28:12,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure d7ccc746561a2906614c37d63eb65d7e, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:28:12,506 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a838fb955540fe2b189db5690cef5605, ASSIGN because future has completed 2024-11-16T06:28:12,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=197, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure a838fb955540fe2b189db5690cef5605, server=3456ee6a3164,40155,1731738278871}] 2024-11-16T06:28:12,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-16T06:28:12,660 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:12,660 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7752): Opening region: {ENCODED => d7ccc746561a2906614c37d63eb65d7e, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e.', STARTKEY => '', ENDKEY => '1'} 2024-11-16T06:28:12,661 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. service=AccessControlService 2024-11-16T06:28:12,661 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:12,661 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:28:12,661 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7752): Opening region: {ENCODED => a838fb955540fe2b189db5690cef5605, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605.', STARTKEY => '1', ENDKEY => ''} 2024-11-16T06:28:12,661 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:12,661 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. service=AccessControlService 2024-11-16T06:28:12,661 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:28:12,661 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-16T06:28:12,661 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7794): checking encryption for d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:12,661 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7797): checking classloading for d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:12,661 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:12,661 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T06:28:12,661 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7794): checking encryption for a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:12,661 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7797): checking classloading for a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:12,662 INFO [StoreOpener-d7ccc746561a2906614c37d63eb65d7e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:12,662 INFO [StoreOpener-a838fb955540fe2b189db5690cef5605-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:12,664 INFO [StoreOpener-d7ccc746561a2906614c37d63eb65d7e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d7ccc746561a2906614c37d63eb65d7e columnFamilyName cf 2024-11-16T06:28:12,664 INFO [StoreOpener-a838fb955540fe2b189db5690cef5605-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a838fb955540fe2b189db5690cef5605 columnFamilyName cf 2024-11-16T06:28:12,664 DEBUG [StoreOpener-a838fb955540fe2b189db5690cef5605-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:28:12,664 DEBUG [StoreOpener-d7ccc746561a2906614c37d63eb65d7e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:28:12,665 INFO [StoreOpener-a838fb955540fe2b189db5690cef5605-1 {}] regionserver.HStore(327): Store=a838fb955540fe2b189db5690cef5605/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:28:12,665 INFO [StoreOpener-d7ccc746561a2906614c37d63eb65d7e-1 {}] regionserver.HStore(327): Store=d7ccc746561a2906614c37d63eb65d7e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T06:28:12,665 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1038): replaying wal for a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:12,665 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1038): replaying wal for d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:12,665 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:12,665 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:12,666 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:12,666 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:12,666 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1048): stopping wal replay for d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:12,666 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1048): stopping wal replay for a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:12,666 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1060): Cleaning up temporary data for d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:12,666 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1060): Cleaning up temporary data for a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:12,667 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1093): writing seq id for d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:12,667 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1093): writing seq id for a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:12,668 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:28:12,668 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T06:28:12,668 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1114): Opened a838fb955540fe2b189db5690cef5605; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68474309, jitterRate=0.020346716046333313}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:28:12,668 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1114): Opened d7ccc746561a2906614c37d63eb65d7e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70114543, jitterRate=0.04478810727596283}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T06:28:12,669 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:12,669 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:12,669 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1006): Region open journal for d7ccc746561a2906614c37d63eb65d7e: Running coprocessor pre-open hook at 1731738492661Writing region info on filesystem at 1731738492661Initializing all the Stores at 1731738492662 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738492662Cleaning up temporary data from old regions at 1731738492666 (+4 ms)Running coprocessor post-open hooks at 1731738492669 (+3 ms)Region opened successfully at 1731738492669 2024-11-16T06:28:12,669 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1006): Region open journal for a838fb955540fe2b189db5690cef5605: Running coprocessor pre-open hook at 1731738492661Writing region info on filesystem at 1731738492661Initializing all the Stores at 1731738492662 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731738492662Cleaning up temporary data from old regions at 1731738492666 (+4 ms)Running coprocessor post-open hooks at 1731738492669 (+3 ms)Region opened successfully at 1731738492669 2024-11-16T06:28:12,670 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605., pid=197, masterSystemTime=1731738492659 2024-11-16T06:28:12,670 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e., pid=196, masterSystemTime=1731738492658 2024-11-16T06:28:12,671 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:12,671 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:12,672 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=a838fb955540fe2b189db5690cef5605, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:28:12,672 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:12,672 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:12,672 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=d7ccc746561a2906614c37d63eb65d7e, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:28:12,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure a838fb955540fe2b189db5690cef5605, server=3456ee6a3164,40155,1731738278871 because future has completed 2024-11-16T06:28:12,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure d7ccc746561a2906614c37d63eb65d7e, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:28:12,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=195 2024-11-16T06:28:12,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; OpenRegionProcedure a838fb955540fe2b189db5690cef5605, server=3456ee6a3164,40155,1731738278871 in 167 msec 2024-11-16T06:28:12,676 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=194 2024-11-16T06:28:12,676 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=194, state=SUCCESS, hasLock=false; OpenRegionProcedure d7ccc746561a2906614c37d63eb65d7e, server=3456ee6a3164,35939,1731738278639 in 169 msec 2024-11-16T06:28:12,677 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a838fb955540fe2b189db5690cef5605, ASSIGN in 325 msec 2024-11-16T06:28:12,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=194, resume processing ppid=193 2024-11-16T06:28:12,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d7ccc746561a2906614c37d63eb65d7e, ASSIGN in 325 msec 2024-11-16T06:28:12,678 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T06:28:12,678 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738492678"}]},"ts":"1731738492678"} 2024-11-16T06:28:12,680 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-16T06:28:12,680 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T06:28:12,681 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-16T06:28:12,683 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-16T06:28:12,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:28:12,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:28:12,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:28:12,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:28:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-16T06:28:13,115 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:13,115 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:13,115 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:13,115 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:13,115 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:13,116 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:13,116 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:13,116 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:13,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 829 msec 2024-11-16T06:28:13,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-16T06:28:13,424 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-16T06:28:13,424 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-16T06:28:13,427 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:13,427 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:13,428 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:28:13,430 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-16T06:28:13,438 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-16T06:28:13,445 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-16T06:28:13,448 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-16T06:28:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738493448 (current time:1731738493448). 2024-11-16T06:28:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:28:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-16T06:28:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:28:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59aaa6fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:28:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:28:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:28:13,450 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:28:13,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:28:13,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:28:13,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4947c2ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:28:13,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:28:13,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:28:13,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:28:13,453 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52986, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:28:13,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35ca81b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:28:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:28:13,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:28:13,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:28:13,467 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:28:13,469 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381. 2024-11-16T06:28:13,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:28:13,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:28:13,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:28:13,469 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:28:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@544a4e1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:28:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:28:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:28:13,474 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:28:13,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:28:13,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:28:13,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ad814b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:28:13,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:28:13,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:28:13,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:28:13,476 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53010, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:28:13,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0b4d51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:28:13,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:28:13,479 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:28:13,479 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:28:13,481 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47290, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:28:13,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:28:13,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:28:13,484 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42956, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:28:13,486 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381. 2024-11-16T06:28:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:28:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:28:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:28:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-16T06:28:13,488 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:28:13,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:28:13,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-16T06:28:13,491 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:28:13,492 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:28:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-16T06:28:13,495 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:28:13,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-16T06:28:13,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742346_1522 (size=203) 2024-11-16T06:28:13,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742346_1522 (size=203) 2024-11-16T06:28:13,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742346_1522 (size=203) 2024-11-16T06:28:13,526 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:28:13,526 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d7ccc746561a2906614c37d63eb65d7e}, {pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a838fb955540fe2b189db5690cef5605}] 2024-11-16T06:28:13,528 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:13,528 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:13,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-16T06:28:13,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=200 2024-11-16T06:28:13,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=199 2024-11-16T06:28:13,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:13,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.HRegion(2603): Flush status journal for a838fb955540fe2b189db5690cef5605: 2024-11-16T06:28:13,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-16T06:28:13,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:13,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.HRegion(2603): Flush status journal for d7ccc746561a2906614c37d63eb65d7e: 2024-11-16T06:28:13,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-16T06:28:13,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:13,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:13,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:28:13,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:28:13,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:28:13,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-16T06:28:13,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742347_1523 (size=82) 2024-11-16T06:28:13,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742347_1523 (size=82) 2024-11-16T06:28:13,700 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:13,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742347_1523 (size=82) 2024-11-16T06:28:13,700 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=199 2024-11-16T06:28:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=199 2024-11-16T06:28:13,701 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:13,701 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:13,703 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d7ccc746561a2906614c37d63eb65d7e in 176 msec 2024-11-16T06:28:13,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742348_1524 (size=82) 2024-11-16T06:28:13,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742348_1524 (size=82) 2024-11-16T06:28:13,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:13,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=200 2024-11-16T06:28:13,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742348_1524 (size=82) 2024-11-16T06:28:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=200 2024-11-16T06:28:13,721 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:13,721 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:13,723 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=200, resume processing ppid=198 2024-11-16T06:28:13,723 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a838fb955540fe2b189db5690cef5605 in 196 msec 2024-11-16T06:28:13,723 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:28:13,724 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:28:13,725 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:28:13,725 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:28:13,725 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:28:13,726 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-16T06:28:13,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742349_1525 (size=74) 2024-11-16T06:28:13,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742349_1525 (size=74) 2024-11-16T06:28:13,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742349_1525 (size=74) 2024-11-16T06:28:13,746 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:28:13,746 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:13,747 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:13,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742350_1526 (size=697) 2024-11-16T06:28:13,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742350_1526 (size=697) 2024-11-16T06:28:13,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742350_1526 (size=697) 2024-11-16T06:28:13,778 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:28:13,786 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:28:13,787 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:13,788 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:28:13,788 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-16T06:28:13,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 300 msec 2024-11-16T06:28:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-16T06:28:13,815 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-16T06:28:13,823 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:28:13,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40155 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. with WAL disabled. Data may be lost in the event of a crash. 2024-11-16T06:28:13,827 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-16T06:28:13,830 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:13,830 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:13,830 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T06:28:13,832 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-16T06:28:13,838 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-16T06:28:13,847 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-16T06:28:13,851 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-16T06:28:13,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731738493851 (current time:1731738493851). 2024-11-16T06:28:13,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-16T06:28:13,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-16T06:28:13,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-16T06:28:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ab77535, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:28:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:28:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:28:13,854 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:28:13,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:28:13,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:28:13,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bef9d11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:28:13,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:28:13,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:28:13,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:28:13,856 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53022, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:28:13,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22c053c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:28:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:28:13,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:28:13,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:28:13,860 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47296, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:28:13,861 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381. 2024-11-16T06:28:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:28:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:28:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:28:13,861 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:28:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ba5bfb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:28:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,39381,-1 for getting cluster id 2024-11-16T06:28:13,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T06:28:13,865 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e571bdc5-ba58-4d7b-b324-bb9f590ba66f' 2024-11-16T06:28:13,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T06:28:13,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e571bdc5-ba58-4d7b-b324-bb9f590ba66f" 2024-11-16T06:28:13,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19c78621, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:28:13,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,39381,-1] 2024-11-16T06:28:13,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T06:28:13,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:28:13,867 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53028, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T06:28:13,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77fecc6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T06:28:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T06:28:13,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,37533,1731738278939, seqNum=-1] 2024-11-16T06:28:13,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:28:13,871 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47298, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:28:13,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., hostname=3456ee6a3164,35939,1731738278639, seqNum=2] 2024-11-16T06:28:13,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T06:28:13,874 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42962, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T06:28:13,875 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381. 2024-11-16T06:28:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T06:28:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:28:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:28:13,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-16T06:28:13,876 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:28:13,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-16T06:28:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-16T06:28:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-16T06:28:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-16T06:28:13,880 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-16T06:28:13,881 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-16T06:28:13,884 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-16T06:28:13,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742351_1527 (size=198) 2024-11-16T06:28:13,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742351_1527 (size=198) 2024-11-16T06:28:13,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742351_1527 (size=198) 2024-11-16T06:28:13,900 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-16T06:28:13,900 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d7ccc746561a2906614c37d63eb65d7e}, {pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a838fb955540fe2b189db5690cef5605}] 2024-11-16T06:28:13,901 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:13,901 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:13,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-16T06:28:14,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=203 2024-11-16T06:28:14,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:14,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-11-16T06:28:14,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:14,055 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2902): Flushing a838fb955540fe2b189db5690cef5605 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-16T06:28:14,055 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2902): Flushing d7ccc746561a2906614c37d63eb65d7e 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-16T06:28:14,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111691299903d08c4225aab494599e36dd08_d7ccc746561a2906614c37d63eb65d7e is 71, key is 028ae734b878f32596b152d2bc646da2/cf:q/1731738493823/Put/seqid=0 2024-11-16T06:28:14,090 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241116ba0cc4ef52f64f2e918dad9e059fe3b5_a838fb955540fe2b189db5690cef5605 is 71, key is 11e5740c64526fdfb4e209fa6f8d102c/cf:q/1731738493825/Put/seqid=0 2024-11-16T06:28:14,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742352_1528 (size=5102) 2024-11-16T06:28:14,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742352_1528 (size=5102) 2024-11-16T06:28:14,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742352_1528 (size=5102) 2024-11-16T06:28:14,108 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:28:14,114 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111691299903d08c4225aab494599e36dd08_d7ccc746561a2906614c37d63eb65d7e to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024111691299903d08c4225aab494599e36dd08_d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:14,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/.tmp/cf/a0c21785daa94642b93a2263ab93e903, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=d7ccc746561a2906614c37d63eb65d7e] 2024-11-16T06:28:14,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/.tmp/cf/a0c21785daa94642b93a2263ab93e903 is 220, key is 0707d8e7f430270443446b960f8c3479c/cf:q/1731738493823/Put/seqid=0 2024-11-16T06:28:14,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742353_1529 (size=8172) 2024-11-16T06:28:14,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742353_1529 (size=8172) 2024-11-16T06:28:14,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742353_1529 (size=8172) 2024-11-16T06:28:14,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:28:14,133 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241116ba0cc4ef52f64f2e918dad9e059fe3b5_a838fb955540fe2b189db5690cef5605 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241116ba0cc4ef52f64f2e918dad9e059fe3b5_a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:14,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/.tmp/cf/0beac15d05e64540955d2dc86a222bf7, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=a838fb955540fe2b189db5690cef5605] 2024-11-16T06:28:14,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/.tmp/cf/0beac15d05e64540955d2dc86a222bf7 is 220, key is 10ee92962f4d8906d18a4b70fa60dda7b/cf:q/1731738493825/Put/seqid=0 2024-11-16T06:28:14,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742354_1530 (size=5962) 2024-11-16T06:28:14,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742354_1530 (size=5962) 2024-11-16T06:28:14,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742354_1530 (size=5962) 2024-11-16T06:28:14,148 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/.tmp/cf/a0c21785daa94642b93a2263ab93e903 2024-11-16T06:28:14,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742355_1531 (size=15527) 2024-11-16T06:28:14,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/.tmp/cf/a0c21785daa94642b93a2263ab93e903 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/cf/a0c21785daa94642b93a2263ab93e903 2024-11-16T06:28:14,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742355_1531 (size=15527) 2024-11-16T06:28:14,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742355_1531 (size=15527) 2024-11-16T06:28:14,157 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/.tmp/cf/0beac15d05e64540955d2dc86a222bf7 2024-11-16T06:28:14,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/.tmp/cf/0beac15d05e64540955d2dc86a222bf7 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/cf/0beac15d05e64540955d2dc86a222bf7 2024-11-16T06:28:14,170 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/cf/a0c21785daa94642b93a2263ab93e903, entries=3, sequenceid=6, filesize=5.8 K 2024-11-16T06:28:14,174 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for d7ccc746561a2906614c37d63eb65d7e in 119ms, sequenceid=6, compaction requested=false 2024-11-16T06:28:14,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-16T06:28:14,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for d7ccc746561a2906614c37d63eb65d7e: 2024-11-16T06:28:14,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-16T06:28:14,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:14,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:28:14,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/cf/a0c21785daa94642b93a2263ab93e903] hfiles 2024-11-16T06:28:14,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/cf/a0c21785daa94642b93a2263ab93e903 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:14,176 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/cf/0beac15d05e64540955d2dc86a222bf7, entries=47, sequenceid=6, filesize=15.2 K 2024-11-16T06:28:14,178 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for a838fb955540fe2b189db5690cef5605 in 123ms, sequenceid=6, compaction requested=false 2024-11-16T06:28:14,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2603): Flush status journal for a838fb955540fe2b189db5690cef5605: 2024-11-16T06:28:14,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-16T06:28:14,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:14,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-16T06:28:14,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/cf/0beac15d05e64540955d2dc86a222bf7] hfiles 2024-11-16T06:28:14,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/cf/0beac15d05e64540955d2dc86a222bf7 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:14,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-16T06:28:14,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742356_1532 (size=121) 2024-11-16T06:28:14,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742356_1532 (size=121) 2024-11-16T06:28:14,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742356_1532 (size=121) 2024-11-16T06:28:14,199 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:14,199 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-11-16T06:28:14,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-11-16T06:28:14,200 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:14,200 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:14,202 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d7ccc746561a2906614c37d63eb65d7e in 301 msec 2024-11-16T06:28:14,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742357_1533 (size=121) 2024-11-16T06:28:14,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742357_1533 (size=121) 2024-11-16T06:28:14,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742357_1533 (size=121) 2024-11-16T06:28:14,213 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:14,214 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=203 2024-11-16T06:28:14,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster(4169): Remote procedure done, pid=203 2024-11-16T06:28:14,214 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:14,214 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:14,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=203, resume processing ppid=201 2024-11-16T06:28:14,218 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-16T06:28:14,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a838fb955540fe2b189db5690cef5605 in 315 msec 2024-11-16T06:28:14,219 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-16T06:28:14,220 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-16T06:28:14,220 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-16T06:28:14,220 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T06:28:14,221 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241116ba0cc4ef52f64f2e918dad9e059fe3b5_a838fb955540fe2b189db5690cef5605, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024111691299903d08c4225aab494599e36dd08_d7ccc746561a2906614c37d63eb65d7e] hfiles 2024-11-16T06:28:14,221 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241116ba0cc4ef52f64f2e918dad9e059fe3b5_a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:14,221 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024111691299903d08c4225aab494599e36dd08_d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:14,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742358_1534 (size=305) 2024-11-16T06:28:14,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742358_1534 (size=305) 2024-11-16T06:28:14,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742358_1534 (size=305) 2024-11-16T06:28:14,251 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-16T06:28:14,251 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:14,255 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:14,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742359_1535 (size=1007) 2024-11-16T06:28:14,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742359_1535 (size=1007) 2024-11-16T06:28:14,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742359_1535 (size=1007) 2024-11-16T06:28:14,303 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-16T06:28:14,332 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-16T06:28:14,333 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:14,335 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-16T06:28:14,335 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-16T06:28:14,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 458 msec 2024-11-16T06:28:14,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-16T06:28:14,504 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-16T06:28:14,505 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738494505 2024-11-16T06:28:14,505 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:36767, tgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738494505, rawTgtDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738494505, srcFsUri=hdfs://localhost:36767, srcDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:28:14,549 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:36767, inputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7 2024-11-16T06:28:14,549 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738494505, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738494505/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:14,551 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-16T06:28:14,556 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738494505/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:14,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742360_1536 (size=1007) 2024-11-16T06:28:14,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742360_1536 (size=1007) 2024-11-16T06:28:14,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742360_1536 (size=1007) 2024-11-16T06:28:14,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742361_1537 (size=198) 2024-11-16T06:28:14,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742361_1537 (size=198) 2024-11-16T06:28:14,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742361_1537 (size=198) 2024-11-16T06:28:14,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:28:14,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:28:14,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:28:15,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-11252665185214645323.jar 2024-11-16T06:28:15,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:28:15,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:28:15,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop-1375220171999724977.jar 2024-11-16T06:28:15,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:28:15,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:28:15,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:28:15,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:28:15,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:28:15,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-16T06:28:15,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-16T06:28:15,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-16T06:28:15,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-16T06:28:15,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-16T06:28:15,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-16T06:28:15,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-16T06:28:15,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-16T06:28:15,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-16T06:28:15,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-16T06:28:15,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-16T06:28:15,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-16T06:28:15,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:28:15,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:28:15,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:28:15,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:28:15,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-16T06:28:15,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:28:15,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-16T06:28:15,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742362_1538 (size=131440) 2024-11-16T06:28:15,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742362_1538 (size=131440) 2024-11-16T06:28:15,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742362_1538 (size=131440) 2024-11-16T06:28:15,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742363_1539 (size=4188619) 2024-11-16T06:28:15,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742363_1539 (size=4188619) 2024-11-16T06:28:15,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742363_1539 (size=4188619) 2024-11-16T06:28:15,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742364_1540 (size=1323991) 2024-11-16T06:28:15,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742364_1540 (size=1323991) 2024-11-16T06:28:15,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742364_1540 (size=1323991) 2024-11-16T06:28:15,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742365_1541 (size=903732) 2024-11-16T06:28:15,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742365_1541 (size=903732) 2024-11-16T06:28:15,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742365_1541 (size=903732) 2024-11-16T06:28:15,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742366_1542 (size=8360083) 2024-11-16T06:28:15,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742366_1542 (size=8360083) 2024-11-16T06:28:15,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742366_1542 (size=8360083) 2024-11-16T06:28:15,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742367_1543 (size=1877034) 2024-11-16T06:28:15,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742367_1543 (size=1877034) 2024-11-16T06:28:15,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742367_1543 (size=1877034) 2024-11-16T06:28:15,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742368_1544 (size=77835) 2024-11-16T06:28:15,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742368_1544 (size=77835) 2024-11-16T06:28:15,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742368_1544 (size=77835) 2024-11-16T06:28:15,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742369_1545 (size=30949) 2024-11-16T06:28:15,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742369_1545 (size=30949) 2024-11-16T06:28:15,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742369_1545 (size=30949) 2024-11-16T06:28:15,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742370_1546 (size=1597327) 2024-11-16T06:28:15,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742370_1546 (size=1597327) 2024-11-16T06:28:15,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742370_1546 (size=1597327) 2024-11-16T06:28:16,027 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0009_000001 (auth:SIMPLE) from 127.0.0.1:50404 2024-11-16T06:28:16,039 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_2/usercache/jenkins/appcache/application_1731738285782_0009/container_1731738285782_0009_01_000001/launch_container.sh] 2024-11-16T06:28:16,039 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_2/usercache/jenkins/appcache/application_1731738285782_0009/container_1731738285782_0009_01_000001/container_tokens] 2024-11-16T06:28:16,039 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-1_2/usercache/jenkins/appcache/application_1731738285782_0009/container_1731738285782_0009_01_000001/sysfs] 2024-11-16T06:28:16,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742371_1547 (size=4695811) 2024-11-16T06:28:16,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742371_1547 (size=4695811) 2024-11-16T06:28:16,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742371_1547 (size=4695811) 2024-11-16T06:28:16,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742372_1548 (size=232957) 2024-11-16T06:28:16,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742372_1548 (size=232957) 2024-11-16T06:28:16,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742372_1548 (size=232957) 2024-11-16T06:28:16,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742373_1549 (size=127628) 2024-11-16T06:28:16,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742373_1549 (size=127628) 2024-11-16T06:28:16,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742373_1549 (size=127628) 2024-11-16T06:28:16,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742374_1550 (size=20406) 2024-11-16T06:28:16,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742374_1550 (size=20406) 2024-11-16T06:28:16,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742374_1550 (size=20406) 2024-11-16T06:28:16,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742375_1551 (size=5175431) 2024-11-16T06:28:16,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742375_1551 (size=5175431) 2024-11-16T06:28:16,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742375_1551 (size=5175431) 2024-11-16T06:28:16,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742376_1552 (size=6424738) 2024-11-16T06:28:16,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742376_1552 (size=6424738) 2024-11-16T06:28:16,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742376_1552 (size=6424738) 2024-11-16T06:28:16,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742377_1553 (size=440656) 2024-11-16T06:28:16,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742377_1553 (size=440656) 2024-11-16T06:28:16,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742377_1553 (size=440656) 2024-11-16T06:28:16,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742378_1554 (size=217634) 2024-11-16T06:28:16,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742378_1554 (size=217634) 2024-11-16T06:28:16,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742378_1554 (size=217634) 2024-11-16T06:28:16,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742379_1555 (size=1832290) 2024-11-16T06:28:16,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742379_1555 (size=1832290) 2024-11-16T06:28:16,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742379_1555 (size=1832290) 2024-11-16T06:28:16,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742380_1556 (size=322274) 2024-11-16T06:28:16,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742380_1556 (size=322274) 2024-11-16T06:28:16,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742380_1556 (size=322274) 2024-11-16T06:28:16,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742381_1557 (size=503880) 2024-11-16T06:28:16,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742381_1557 (size=503880) 2024-11-16T06:28:16,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742381_1557 (size=503880) 2024-11-16T06:28:16,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742382_1558 (size=29229) 2024-11-16T06:28:16,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742382_1558 (size=29229) 2024-11-16T06:28:16,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742382_1558 (size=29229) 2024-11-16T06:28:16,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742383_1559 (size=24096) 2024-11-16T06:28:16,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742383_1559 (size=24096) 2024-11-16T06:28:16,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742383_1559 (size=24096) 2024-11-16T06:28:16,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742384_1560 (size=111872) 2024-11-16T06:28:16,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742384_1560 (size=111872) 2024-11-16T06:28:16,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742384_1560 (size=111872) 2024-11-16T06:28:16,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742385_1561 (size=45609) 2024-11-16T06:28:16,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742385_1561 (size=45609) 2024-11-16T06:28:16,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742385_1561 (size=45609) 2024-11-16T06:28:16,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742386_1562 (size=136454) 2024-11-16T06:28:16,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742386_1562 (size=136454) 2024-11-16T06:28:16,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742386_1562 (size=136454) 2024-11-16T06:28:16,862 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-16T06:28:16,868 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-16T06:28:16,870 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.9 K 2024-11-16T06:28:16,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742387_1563 (size=770) 2024-11-16T06:28:16,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742387_1563 (size=770) 2024-11-16T06:28:16,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742387_1563 (size=770) 2024-11-16T06:28:16,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742388_1564 (size=15) 2024-11-16T06:28:16,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742388_1564 (size=15) 2024-11-16T06:28:16,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742388_1564 (size=15) 2024-11-16T06:28:16,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742389_1565 (size=303900) 2024-11-16T06:28:16,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742389_1565 (size=303900) 2024-11-16T06:28:16,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742389_1565 (size=303900) 2024-11-16T06:28:16,934 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:28:16,935 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-16T06:28:16,941 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0010_000001 (auth:SIMPLE) from 127.0.0.1:37532 2024-11-16T06:28:17,485 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:28:18,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:18,275 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-16T06:28:18,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-16T06:28:22,729 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0010_000001 (auth:SIMPLE) from 127.0.0.1:49668 2024-11-16T06:28:23,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742390_1566 (size=349574) 2024-11-16T06:28:23,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742390_1566 (size=349574) 2024-11-16T06:28:23,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742390_1566 (size=349574) 2024-11-16T06:28:23,778 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:28:24,955 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0010_000001 (auth:SIMPLE) from 127.0.0.1:58780 2024-11-16T06:28:26,489 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 28ca42946236efd20803c06170fc6c61, had cached 0 bytes from a total of 6088 2024-11-16T06:28:26,491 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 15023cc25d03bb61082803e08a9c5ef3, had cached 0 bytes from a total of 14463 2024-11-16T06:28:27,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742391_1567 (size=15527) 2024-11-16T06:28:27,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742391_1567 (size=15527) 2024-11-16T06:28:27,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742391_1567 (size=15527) 2024-11-16T06:28:27,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742392_1568 (size=8172) 2024-11-16T06:28:27,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742392_1568 (size=8172) 2024-11-16T06:28:27,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742392_1568 (size=8172) 2024-11-16T06:28:27,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742393_1569 (size=5962) 2024-11-16T06:28:27,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742393_1569 (size=5962) 2024-11-16T06:28:27,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742393_1569 (size=5962) 2024-11-16T06:28:27,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742394_1570 (size=5102) 2024-11-16T06:28:27,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742394_1570 (size=5102) 2024-11-16T06:28:27,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742394_1570 (size=5102) 2024-11-16T06:28:27,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742395_1571 (size=17473) 2024-11-16T06:28:27,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742395_1571 (size=17473) 2024-11-16T06:28:27,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742395_1571 (size=17473) 2024-11-16T06:28:27,823 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_1/usercache/jenkins/appcache/application_1731738285782_0010/container_1731738285782_0010_01_000002/launch_container.sh] 2024-11-16T06:28:27,823 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_1/usercache/jenkins/appcache/application_1731738285782_0010/container_1731738285782_0010_01_000002/container_tokens] 2024-11-16T06:28:27,823 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_1/usercache/jenkins/appcache/application_1731738285782_0010/container_1731738285782_0010_01_000002/sysfs] 2024-11-16T06:28:28,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742396_1572 (size=476) 2024-11-16T06:28:28,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742396_1572 (size=476) 2024-11-16T06:28:28,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742396_1572 (size=476) 2024-11-16T06:28:28,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742397_1573 (size=17473) 2024-11-16T06:28:28,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742397_1573 (size=17473) 2024-11-16T06:28:28,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742397_1573 (size=17473) 2024-11-16T06:28:28,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742398_1574 (size=349574) 2024-11-16T06:28:28,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742398_1574 (size=349574) 2024-11-16T06:28:28,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742398_1574 (size=349574) 2024-11-16T06:28:30,113 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-16T06:28:30,114 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-16T06:28:30,120 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,120 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-16T06:28:30,121 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-16T06:28:30,121 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,121 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-16T06:28:30,121 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-16T06:28:30,121 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2111949037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738494505/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738494505/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,121 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738494505/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-16T06:28:30,121 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/export-test/export-1731738494505/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-16T06:28:30,126 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=204, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-16T06:28:30,129 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738510128"}]},"ts":"1731738510128"} 2024-11-16T06:28:30,130 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-16T06:28:30,130 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-16T06:28:30,131 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=205, ppid=204, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-16T06:28:30,132 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d7ccc746561a2906614c37d63eb65d7e, UNASSIGN}, {pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a838fb955540fe2b189db5690cef5605, UNASSIGN}] 2024-11-16T06:28:30,132 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a838fb955540fe2b189db5690cef5605, UNASSIGN 2024-11-16T06:28:30,132 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d7ccc746561a2906614c37d63eb65d7e, UNASSIGN 2024-11-16T06:28:30,133 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=a838fb955540fe2b189db5690cef5605, regionState=CLOSING, regionLocation=3456ee6a3164,40155,1731738278871 2024-11-16T06:28:30,133 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=d7ccc746561a2906614c37d63eb65d7e, regionState=CLOSING, regionLocation=3456ee6a3164,35939,1731738278639 2024-11-16T06:28:30,134 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a838fb955540fe2b189db5690cef5605, UNASSIGN because future has completed 2024-11-16T06:28:30,134 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:28:30,134 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure a838fb955540fe2b189db5690cef5605, server=3456ee6a3164,40155,1731738278871}] 2024-11-16T06:28:30,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d7ccc746561a2906614c37d63eb65d7e, UNASSIGN because future has completed 2024-11-16T06:28:30,135 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-16T06:28:30,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=209, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure d7ccc746561a2906614c37d63eb65d7e, server=3456ee6a3164,35939,1731738278639}] 2024-11-16T06:28:30,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-16T06:28:30,286 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(122): Close a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:30,286 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:28:30,287 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1722): Closing a838fb955540fe2b189db5690cef5605, disabling compactions & flushes 2024-11-16T06:28:30,287 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:30,287 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:30,287 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. after waiting 0 ms 2024-11-16T06:28:30,287 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:30,287 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(122): Close d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:30,287 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-16T06:28:30,287 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1722): Closing d7ccc746561a2906614c37d63eb65d7e, disabling compactions & flushes 2024-11-16T06:28:30,287 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:30,287 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:30,287 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. after waiting 0 ms 2024-11-16T06:28:30,287 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:30,290 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:28:30,290 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T06:28:30,291 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:28:30,291 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:28:30,291 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e. 2024-11-16T06:28:30,291 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605. 2024-11-16T06:28:30,291 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1676): Region close journal for d7ccc746561a2906614c37d63eb65d7e: Waiting for close lock at 1731738510287Running coprocessor pre-close hooks at 1731738510287Disabling compacts and flushes for region at 1731738510287Disabling writes for close at 1731738510287Writing region close event to WAL at 1731738510288 (+1 ms)Running coprocessor post-close hooks at 1731738510291 (+3 ms)Closed at 1731738510291 2024-11-16T06:28:30,291 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1676): Region close journal for a838fb955540fe2b189db5690cef5605: Waiting for close lock at 1731738510287Running coprocessor pre-close hooks at 1731738510287Disabling compacts and flushes for region at 1731738510287Disabling writes for close at 1731738510287Writing region close event to WAL at 1731738510287Running coprocessor post-close hooks at 1731738510291 (+4 ms)Closed at 1731738510291 2024-11-16T06:28:30,292 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(157): Closed d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:30,293 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=d7ccc746561a2906614c37d63eb65d7e, regionState=CLOSED 2024-11-16T06:28:30,293 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(157): Closed a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:30,294 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=a838fb955540fe2b189db5690cef5605, regionState=CLOSED 2024-11-16T06:28:30,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure d7ccc746561a2906614c37d63eb65d7e, server=3456ee6a3164,35939,1731738278639 because future has completed 2024-11-16T06:28:30,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure a838fb955540fe2b189db5690cef5605, server=3456ee6a3164,40155,1731738278871 because future has completed 2024-11-16T06:28:30,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=206 2024-11-16T06:28:30,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=206, state=SUCCESS, hasLock=false; CloseRegionProcedure d7ccc746561a2906614c37d63eb65d7e, server=3456ee6a3164,35939,1731738278639 in 160 msec 2024-11-16T06:28:30,296 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=207 2024-11-16T06:28:30,296 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; CloseRegionProcedure a838fb955540fe2b189db5690cef5605, server=3456ee6a3164,40155,1731738278871 in 161 msec 2024-11-16T06:28:30,297 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d7ccc746561a2906614c37d63eb65d7e, UNASSIGN in 164 msec 2024-11-16T06:28:30,298 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=205 2024-11-16T06:28:30,298 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a838fb955540fe2b189db5690cef5605, UNASSIGN in 164 msec 2024-11-16T06:28:30,299 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=204 2024-11-16T06:28:30,299 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=204, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 167 msec 2024-11-16T06:28:30,300 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731738510300"}]},"ts":"1731738510300"} 2024-11-16T06:28:30,301 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-16T06:28:30,301 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-16T06:28:30,303 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 176 msec 2024-11-16T06:28:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-16T06:28:30,445 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-16T06:28:30,446 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,448 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,448 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=210, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,450 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,451 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:30,451 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:30,453 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/recovered.edits] 2024-11-16T06:28:30,453 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/cf, FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/recovered.edits] 2024-11-16T06:28:30,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-16T06:28:30,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-16T06:28:30,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-16T06:28:30,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-16T06:28:30,456 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/cf/0beac15d05e64540955d2dc86a222bf7 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/cf/0beac15d05e64540955d2dc86a222bf7 2024-11-16T06:28:30,456 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/cf/a0c21785daa94642b93a2263ab93e903 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/cf/a0c21785daa94642b93a2263ab93e903 2024-11-16T06:28:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:28:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:28:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:28:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-16T06:28:30,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=210 2024-11-16T06:28:30,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:30,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:30,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:30,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-16T06:28:30,459 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605/recovered.edits/9.seqid 2024-11-16T06:28:30,459 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/recovered.edits/9.seqid to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e/recovered.edits/9.seqid 2024-11-16T06:28:30,459 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:30,460 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testtb-testExportFileSystemStateWithSkipTmp/d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:30,460 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-16T06:28:30,460 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-11-16T06:28:30,460 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-11-16T06:28:30,463 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241116ba0cc4ef52f64f2e918dad9e059fe3b5_a838fb955540fe2b189db5690cef5605 to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241116ba0cc4ef52f64f2e918dad9e059fe3b5_a838fb955540fe2b189db5690cef5605 2024-11-16T06:28:30,464 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024111691299903d08c4225aab494599e36dd08_d7ccc746561a2906614c37d63eb65d7e to hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024111691299903d08c4225aab494599e36dd08_d7ccc746561a2906614c37d63eb65d7e 2024-11-16T06:28:30,464 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-11-16T06:28:30,466 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=210, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,468 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-16T06:28:30,470 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-16T06:28:30,471 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=210, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,472 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-16T06:28:30,472 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738510472"}]},"ts":"9223372036854775807"} 2024-11-16T06:28:30,472 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731738510472"}]},"ts":"9223372036854775807"} 2024-11-16T06:28:30,474 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-16T06:28:30,474 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d7ccc746561a2906614c37d63eb65d7e, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731738492283.d7ccc746561a2906614c37d63eb65d7e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a838fb955540fe2b189db5690cef5605, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731738492283.a838fb955540fe2b189db5690cef5605.', STARTKEY => '1', ENDKEY => ''}] 2024-11-16T06:28:30,474 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-16T06:28:30,474 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731738510474"}]},"ts":"9223372036854775807"} 2024-11-16T06:28:30,475 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-16T06:28:30,476 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=210, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,477 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 30 msec 2024-11-16T06:28:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=210 2024-11-16T06:28:30,565 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,565 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-16T06:28:30,571 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-16T06:28:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,574 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-16T06:28:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:30,592 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=809 (was 805) Potentially hanging thread: Thread-7526 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:55238 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38683 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:50366 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_866654925_1 at /127.0.0.1:39844 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111949037_22 at /127.0.0.1:38258 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 136302) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (692179358) connection to localhost/127.0.0.1:38683 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_866654925_1 at /127.0.0.1:53666 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=807 (was 808), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=466 (was 496), ProcessCount=14 (was 15), AvailableMemoryMB=2190 (was 2219) 2024-11-16T06:28:30,592 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-11-16T06:28:30,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-16T06:28:30,599 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fe3aeaf{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-16T06:28:30,601 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7edb01a7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T06:28:30,601 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T06:28:30,602 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ca5bf15{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-16T06:28:30,602 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5342a4dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,STOPPED} 2024-11-16T06:28:34,311 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731738285782_0010_000001 (auth:SIMPLE) from 127.0.0.1:35454 2024-11-16T06:28:34,321 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_3/usercache/jenkins/appcache/application_1731738285782_0010/container_1731738285782_0010_01_000001/launch_container.sh] 2024-11-16T06:28:34,321 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_3/usercache/jenkins/appcache/application_1731738285782_0010/container_1731738285782_0010_01_000001/container_tokens] 2024-11-16T06:28:34,321 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596324682/yarn-1194624354/MiniMRCluster_1596324682-localDir-nm-0_3/usercache/jenkins/appcache/application_1731738285782_0010/container_1731738285782_0010_01_000001/sysfs] 2024-11-16T06:28:35,815 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:28:36,747 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:28:38,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-16T06:28:40,528 WARN [regionserver/3456ee6a3164:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 3, running: 1 2024-11-16T06:28:43,777 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:28:47,623 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f7b7652{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-16T06:28:47,624 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ee86169{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T06:28:47,624 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T06:28:47,624 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1662dfcb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-16T06:28:47,624 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e3a7a80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,STOPPED} 2024-11-16T06:29:04,641 ERROR [Thread[Thread-403,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-16T06:29:04,641 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b4553ed{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-16T06:29:04,642 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@60579b67{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T06:29:04,642 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T06:29:04,642 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55b63922{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-16T06:29:04,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b1c64f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,STOPPED} 2024-11-16T06:29:04,645 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-16T06:29:04,650 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-16T06:29:04,650 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-16T06:29:04,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741830_1006 (size=974749) 2024-11-16T06:29:04,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741830_1006 (size=974749) 2024-11-16T06:29:04,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741830_1006 (size=974749) 2024-11-16T06:29:04,655 ERROR [Thread[Thread-426,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-16T06:29:04,658 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7000b80c{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-16T06:29:04,658 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fcaeaf0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T06:29:04,658 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T06:29:04,659 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d2aeec9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-16T06:29:04,659 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34f9a9b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,STOPPED} 2024-11-16T06:29:04,660 ERROR [Thread[Thread-385,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-16T06:29:04,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-16T06:29:04,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T06:29:04,660 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T06:29:04,660 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T06:29:04,660 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:29:04,660 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:29:04,660 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T06:29:04,660 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T06:29:04,660 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=296659727, stopped=false 2024-11-16T06:29:04,661 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:29:04,661 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-16T06:29:04,661 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3456ee6a3164,39381,1731738277818 2024-11-16T06:29:04,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T06:29:04,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T06:29:04,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T06:29:04,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T06:29:04,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:29:04,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:29:04,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:29:04,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:29:04,664 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T06:29:04,665 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T06:29:04,665 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T06:29:04,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T06:29:04,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T06:29:04,665 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:29:04,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T06:29:04,665 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3456ee6a3164,35939,1731738278639' ***** 2024-11-16T06:29:04,666 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:29:04,666 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T06:29:04,666 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T06:29:04,666 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3456ee6a3164,40155,1731738278871' ***** 2024-11-16T06:29:04,666 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:29:04,666 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T06:29:04,666 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3456ee6a3164,37533,1731738278939' ***** 2024-11-16T06:29:04,666 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:29:04,666 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T06:29:04,666 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T06:29:04,666 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T06:29:04,666 INFO [RS:0;3456ee6a3164:35939 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T06:29:04,666 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T06:29:04,666 INFO [RS:2;3456ee6a3164:37533 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T06:29:04,666 INFO [RS:0;3456ee6a3164:35939 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T06:29:04,666 INFO [RS:2;3456ee6a3164:37533 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T06:29:04,666 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(3091): Received CLOSE for 28ca42946236efd20803c06170fc6c61 2024-11-16T06:29:04,666 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(3091): Received CLOSE for 15023cc25d03bb61082803e08a9c5ef3 2024-11-16T06:29:04,667 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T06:29:04,667 INFO [RS:1;3456ee6a3164:40155 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T06:29:04,667 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T06:29:04,667 INFO [RS:1;3456ee6a3164:40155 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T06:29:04,667 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(959): stopping server 3456ee6a3164,40155,1731738278871 2024-11-16T06:29:04,667 INFO [RS:1;3456ee6a3164:40155 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T06:29:04,667 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(959): stopping server 3456ee6a3164,37533,1731738278939 2024-11-16T06:29:04,667 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(3091): Received CLOSE for d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:29:04,667 INFO [RS:1;3456ee6a3164:40155 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3456ee6a3164:40155. 2024-11-16T06:29:04,667 INFO [RS:2;3456ee6a3164:37533 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T06:29:04,667 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(959): stopping server 3456ee6a3164,35939,1731738278639 2024-11-16T06:29:04,667 INFO [RS:0;3456ee6a3164:35939 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T06:29:04,667 INFO [RS:2;3456ee6a3164:37533 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;3456ee6a3164:37533. 2024-11-16T06:29:04,667 INFO [RS:0;3456ee6a3164:35939 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3456ee6a3164:35939. 2024-11-16T06:29:04,667 DEBUG [RS:1;3456ee6a3164:40155 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T06:29:04,667 DEBUG [RS:1;3456ee6a3164:40155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:29:04,667 DEBUG [RS:0;3456ee6a3164:35939 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T06:29:04,667 DEBUG [RS:0;3456ee6a3164:35939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:29:04,667 DEBUG [RS:2;3456ee6a3164:37533 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T06:29:04,667 DEBUG [RS:2;3456ee6a3164:37533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:29:04,667 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(976): stopping server 3456ee6a3164,40155,1731738278871; all regions closed. 2024-11-16T06:29:04,667 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T06:29:04,667 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T06:29:04,667 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T06:29:04,667 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T06:29:04,667 DEBUG [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(1325): Online Regions={15023cc25d03bb61082803e08a9c5ef3=testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3., d0bbbb84adc872abb747d3f48a8c9338=hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338.} 2024-11-16T06:29:04,667 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T06:29:04,668 DEBUG [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(1351): Waiting on 15023cc25d03bb61082803e08a9c5ef3, d0bbbb84adc872abb747d3f48a8c9338 2024-11-16T06:29:04,668 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 28ca42946236efd20803c06170fc6c61, disabling compactions & flushes 2024-11-16T06:29:04,668 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T06:29:04,668 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T06:29:04,668 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:29:04,668 DEBUG [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 28ca42946236efd20803c06170fc6c61=testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61.} 2024-11-16T06:29:04,668 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:29:04,668 DEBUG [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 28ca42946236efd20803c06170fc6c61 2024-11-16T06:29:04,668 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. after waiting 0 ms 2024-11-16T06:29:04,668 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:29:04,668 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T06:29:04,668 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T06:29:04,668 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T06:29:04,668 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T06:29:04,668 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T06:29:04,668 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 15023cc25d03bb61082803e08a9c5ef3, disabling compactions & flushes 2024-11-16T06:29:04,668 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=69.66 KB heapSize=111.04 KB 2024-11-16T06:29:04,668 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:29:04,668 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:29:04,668 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. after waiting 0 ms 2024-11-16T06:29:04,668 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:29:04,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741835_1011 (size=6842) 2024-11-16T06:29:04,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741835_1011 (size=6842) 2024-11-16T06:29:04,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741835_1011 (size=6842) 2024-11-16T06:29:04,674 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/28ca42946236efd20803c06170fc6c61/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-16T06:29:04,674 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:29:04,674 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:29:04,675 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 28ca42946236efd20803c06170fc6c61: Waiting for close lock at 1731738544667Running coprocessor pre-close hooks at 1731738544667Disabling compacts and flushes for region at 1731738544667Disabling writes for close at 1731738544668 (+1 ms)Writing region close event to WAL at 1731738544668Running coprocessor post-close hooks at 1731738544674 (+6 ms)Closed at 1731738544674 2024-11-16T06:29:04,675 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1731738416146.28ca42946236efd20803c06170fc6c61. 2024-11-16T06:29:04,676 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/default/testExportExpiredSnapshot/15023cc25d03bb61082803e08a9c5ef3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-16T06:29:04,676 DEBUG [RS:1;3456ee6a3164:40155 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/oldWALs 2024-11-16T06:29:04,676 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:29:04,676 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:29:04,676 INFO [RS:1;3456ee6a3164:40155 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3456ee6a3164%2C40155%2C1731738278871:(num 1731738280719) 2024-11-16T06:29:04,676 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 15023cc25d03bb61082803e08a9c5ef3: Waiting for close lock at 1731738544667Running coprocessor pre-close hooks at 1731738544668 (+1 ms)Disabling compacts and flushes for region at 1731738544668Disabling writes for close at 1731738544668Writing region close event to WAL at 1731738544669 (+1 ms)Running coprocessor post-close hooks at 1731738544676 (+7 ms)Closed at 1731738544676 2024-11-16T06:29:04,676 DEBUG [RS:1;3456ee6a3164:40155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:29:04,676 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T06:29:04,676 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3. 2024-11-16T06:29:04,676 INFO [RS:1;3456ee6a3164:40155 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T06:29:04,676 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d0bbbb84adc872abb747d3f48a8c9338, disabling compactions & flushes 2024-11-16T06:29:04,676 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. 2024-11-16T06:29:04,676 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. 2024-11-16T06:29:04,676 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. after waiting 0 ms 2024-11-16T06:29:04,676 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. 2024-11-16T06:29:04,676 INFO [RS:1;3456ee6a3164:40155 {}] hbase.ChoreService(370): Chore service for: regionserver/3456ee6a3164:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T06:29:04,676 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T06:29:04,676 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T06:29:04,676 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing d0bbbb84adc872abb747d3f48a8c9338 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-11-16T06:29:04,676 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T06:29:04,677 INFO [RS:1;3456ee6a3164:40155 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T06:29:04,677 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T06:29:04,677 INFO [RS:1;3456ee6a3164:40155 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40155 2024-11-16T06:29:04,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T06:29:04,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3456ee6a3164,40155,1731738278871 2024-11-16T06:29:04,682 INFO [RS:1;3456ee6a3164:40155 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T06:29:04,683 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3456ee6a3164,40155,1731738278871] 2024-11-16T06:29:04,686 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3456ee6a3164,40155,1731738278871 already deleted, retry=false 2024-11-16T06:29:04,686 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3456ee6a3164,40155,1731738278871 expired; onlineServers=2 2024-11-16T06:29:04,692 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/acl/d0bbbb84adc872abb747d3f48a8c9338/.tmp/l/803264a6d1e842e9b626746056bf6b0a is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1731738413652/DeleteFamily/seqid=0 2024-11-16T06:29:04,694 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/.tmp/info/e86f0a1f3e5e4b059c8971cf2533f993 is 173, key is testExportExpiredSnapshot,1,1731738416146.15023cc25d03bb61082803e08a9c5ef3./info:regioninfo/1731738416503/Put/seqid=0 2024-11-16T06:29:04,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742399_1575 (size=5695) 2024-11-16T06:29:04,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742399_1575 (size=5695) 2024-11-16T06:29:04,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742399_1575 (size=5695) 2024-11-16T06:29:04,697 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/acl/d0bbbb84adc872abb747d3f48a8c9338/.tmp/l/803264a6d1e842e9b626746056bf6b0a 2024-11-16T06:29:04,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742400_1576 (size=14362) 2024-11-16T06:29:04,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742400_1576 (size=14362) 2024-11-16T06:29:04,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742400_1576 (size=14362) 2024-11-16T06:29:04,699 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=59.12 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/.tmp/info/e86f0a1f3e5e4b059c8971cf2533f993 2024-11-16T06:29:04,702 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 803264a6d1e842e9b626746056bf6b0a 2024-11-16T06:29:04,702 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/acl/d0bbbb84adc872abb747d3f48a8c9338/.tmp/l/803264a6d1e842e9b626746056bf6b0a as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/acl/d0bbbb84adc872abb747d3f48a8c9338/l/803264a6d1e842e9b626746056bf6b0a 2024-11-16T06:29:04,706 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 803264a6d1e842e9b626746056bf6b0a 2024-11-16T06:29:04,707 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/acl/d0bbbb84adc872abb747d3f48a8c9338/l/803264a6d1e842e9b626746056bf6b0a, entries=12, sequenceid=27, filesize=5.6 K 2024-11-16T06:29:04,707 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for d0bbbb84adc872abb747d3f48a8c9338 in 31ms, sequenceid=27, compaction requested=false 2024-11-16T06:29:04,711 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/acl/d0bbbb84adc872abb747d3f48a8c9338/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-11-16T06:29:04,711 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:29:04,711 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. 2024-11-16T06:29:04,711 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d0bbbb84adc872abb747d3f48a8c9338: Waiting for close lock at 1731738544676Running coprocessor pre-close hooks at 1731738544676Disabling compacts and flushes for region at 1731738544676Disabling writes for close at 1731738544676Obtaining lock to block concurrent updates at 1731738544676Preparing flush snapshotting stores in d0bbbb84adc872abb747d3f48a8c9338 at 1731738544676Finished memstore snapshotting hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338., syncing WAL and waiting on mvcc, flushsize=dataSize=1412, getHeapSize=3392, getOffHeapSize=0, getCellsCount=23 at 1731738544677 (+1 ms)Flushing stores of hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. at 1731738544677Flushing d0bbbb84adc872abb747d3f48a8c9338/l: creating writer at 1731738544677Flushing d0bbbb84adc872abb747d3f48a8c9338/l: appending metadata at 1731738544692 (+15 ms)Flushing d0bbbb84adc872abb747d3f48a8c9338/l: closing flushed file at 1731738544692Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@611012c3: reopening flushed file at 1731738544702 (+10 ms)Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for d0bbbb84adc872abb747d3f48a8c9338 in 31ms, sequenceid=27, compaction requested=false at 1731738544707 (+5 ms)Writing region close event to WAL at 1731738544708 (+1 ms)Running coprocessor post-close hooks at 1731738544711 (+3 ms)Closed at 1731738544711 2024-11-16T06:29:04,711 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1731738281754.d0bbbb84adc872abb747d3f48a8c9338. 2024-11-16T06:29:04,719 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/.tmp/ns/6e9f2f7f144644ac9ed643c64b1c964e is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3./ns:/1731738413682/DeleteFamily/seqid=0 2024-11-16T06:29:04,720 INFO [regionserver/3456ee6a3164:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T06:29:04,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742401_1577 (size=7779) 2024-11-16T06:29:04,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742401_1577 (size=7779) 2024-11-16T06:29:04,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742401_1577 (size=7779) 2024-11-16T06:29:04,724 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.23 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/.tmp/ns/6e9f2f7f144644ac9ed643c64b1c964e 2024-11-16T06:29:04,724 INFO [regionserver/3456ee6a3164:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T06:29:04,742 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/.tmp/rep_barrier/7d2cbf8d94b94b48950d21d73f219760 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3./rep_barrier:/1731738413682/DeleteFamily/seqid=0 2024-11-16T06:29:04,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742402_1578 (size=8005) 2024-11-16T06:29:04,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742402_1578 (size=8005) 2024-11-16T06:29:04,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742402_1578 (size=8005) 2024-11-16T06:29:04,747 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/.tmp/rep_barrier/7d2cbf8d94b94b48950d21d73f219760 2024-11-16T06:29:04,764 INFO [regionserver/3456ee6a3164:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T06:29:04,765 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/.tmp/table/4f738e30ea144099b0d0bfbbbc844d8e is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731738395467.3649df5adf8edce351b9958fc12f93e3./table:/1731738413682/DeleteFamily/seqid=0 2024-11-16T06:29:04,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742403_1579 (size=8758) 2024-11-16T06:29:04,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742403_1579 (size=8758) 2024-11-16T06:29:04,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742403_1579 (size=8758) 2024-11-16T06:29:04,770 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.97 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/.tmp/table/4f738e30ea144099b0d0bfbbbc844d8e 2024-11-16T06:29:04,774 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/.tmp/info/e86f0a1f3e5e4b059c8971cf2533f993 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/info/e86f0a1f3e5e4b059c8971cf2533f993 2024-11-16T06:29:04,779 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/info/e86f0a1f3e5e4b059c8971cf2533f993, entries=74, sequenceid=199, filesize=14.0 K 2024-11-16T06:29:04,780 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/.tmp/ns/6e9f2f7f144644ac9ed643c64b1c964e as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/ns/6e9f2f7f144644ac9ed643c64b1c964e 2024-11-16T06:29:04,783 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/ns/6e9f2f7f144644ac9ed643c64b1c964e, entries=23, sequenceid=199, filesize=7.6 K 2024-11-16T06:29:04,784 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/.tmp/rep_barrier/7d2cbf8d94b94b48950d21d73f219760 as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/rep_barrier/7d2cbf8d94b94b48950d21d73f219760 2024-11-16T06:29:04,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T06:29:04,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40155-0x100473470b00002, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T06:29:04,785 INFO [RS:1;3456ee6a3164:40155 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T06:29:04,785 INFO [RS:1;3456ee6a3164:40155 {}] regionserver.HRegionServer(1031): Exiting; stopping=3456ee6a3164,40155,1731738278871; zookeeper connection closed. 2024-11-16T06:29:04,785 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a0caf45 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a0caf45 2024-11-16T06:29:04,788 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/rep_barrier/7d2cbf8d94b94b48950d21d73f219760, entries=21, sequenceid=199, filesize=7.8 K 2024-11-16T06:29:04,789 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/.tmp/table/4f738e30ea144099b0d0bfbbbc844d8e as hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/table/4f738e30ea144099b0d0bfbbbc844d8e 2024-11-16T06:29:04,793 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/table/4f738e30ea144099b0d0bfbbbc844d8e, entries=36, sequenceid=199, filesize=8.6 K 2024-11-16T06:29:04,793 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=199, compaction requested=false 2024-11-16T06:29:04,797 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/data/hbase/meta/1588230740/recovered.edits/202.seqid, newMaxSeqId=202, maxSeqId=1 2024-11-16T06:29:04,797 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:29:04,797 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T06:29:04,797 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T06:29:04,797 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731738544668Running coprocessor pre-close hooks at 1731738544668Disabling compacts and flushes for region at 1731738544668Disabling writes for close at 1731738544668Obtaining lock to block concurrent updates at 1731738544668Preparing flush snapshotting stores in 1588230740 at 1731738544668Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=71334, getHeapSize=113640, getOffHeapSize=0, getCellsCount=548 at 1731738544668Flushing stores of hbase:meta,,1.1588230740 at 1731738544669 (+1 ms)Flushing 1588230740/info: creating writer at 1731738544669Flushing 1588230740/info: appending metadata at 1731738544694 (+25 ms)Flushing 1588230740/info: closing flushed file at 1731738544694Flushing 1588230740/ns: creating writer at 1731738544703 (+9 ms)Flushing 1588230740/ns: appending metadata at 1731738544719 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731738544719Flushing 1588230740/rep_barrier: creating writer at 1731738544727 (+8 ms)Flushing 1588230740/rep_barrier: appending metadata at 1731738544742 (+15 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1731738544742Flushing 1588230740/table: creating writer at 1731738544751 (+9 ms)Flushing 1588230740/table: appending metadata at 1731738544765 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731738544765Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51a18308: reopening flushed file at 1731738544774 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1aaaae91: reopening flushed file at 1731738544779 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7eac4545: reopening flushed file at 1731738544783 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d381ead: reopening flushed file at 1731738544788 (+5 ms)Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=199, compaction requested=false at 1731738544793 (+5 ms)Writing region close event to WAL at 1731738544795 (+2 ms)Running coprocessor post-close hooks at 1731738544797 (+2 ms)Closed at 1731738544797 2024-11-16T06:29:04,798 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T06:29:04,868 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(976): stopping server 3456ee6a3164,35939,1731738278639; all regions closed. 2024-11-16T06:29:04,868 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(976): stopping server 3456ee6a3164,37533,1731738278939; all regions closed. 2024-11-16T06:29:04,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741833_1009 (size=17048) 2024-11-16T06:29:04,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741833_1009 (size=17048) 2024-11-16T06:29:04,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741833_1009 (size=17048) 2024-11-16T06:29:04,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741836_1012 (size=81723) 2024-11-16T06:29:04,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741836_1012 (size=81723) 2024-11-16T06:29:04,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741836_1012 (size=81723) 2024-11-16T06:29:04,877 DEBUG [RS:2;3456ee6a3164:37533 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/oldWALs 2024-11-16T06:29:04,877 INFO [RS:2;3456ee6a3164:37533 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3456ee6a3164%2C37533%2C1731738278939.meta:.meta(num 1731738281199) 2024-11-16T06:29:04,878 DEBUG [RS:0;3456ee6a3164:35939 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/oldWALs 2024-11-16T06:29:04,878 INFO [RS:0;3456ee6a3164:35939 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3456ee6a3164%2C35939%2C1731738278639:(num 1731738280719) 2024-11-16T06:29:04,878 DEBUG [RS:0;3456ee6a3164:35939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:29:04,878 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T06:29:04,878 INFO [RS:0;3456ee6a3164:35939 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T06:29:04,878 INFO [RS:0;3456ee6a3164:35939 {}] hbase.ChoreService(370): Chore service for: regionserver/3456ee6a3164:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T06:29:04,878 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T06:29:04,878 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T06:29:04,878 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T06:29:04,878 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T06:29:04,878 INFO [RS:0;3456ee6a3164:35939 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T06:29:04,879 INFO [RS:0;3456ee6a3164:35939 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35939 2024-11-16T06:29:04,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T06:29:04,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3456ee6a3164,35939,1731738278639 2024-11-16T06:29:04,881 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/WALs/3456ee6a3164,37533,1731738278939/3456ee6a3164%2C37533%2C1731738278939.1731738280719 not finished, retry = 0 2024-11-16T06:29:04,881 INFO [RS:0;3456ee6a3164:35939 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T06:29:04,882 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3456ee6a3164,35939,1731738278639] 2024-11-16T06:29:04,884 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3456ee6a3164,35939,1731738278639 already deleted, retry=false 2024-11-16T06:29:04,884 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3456ee6a3164,35939,1731738278639 expired; onlineServers=1 2024-11-16T06:29:04,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073741834_1010 (size=13063) 2024-11-16T06:29:04,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741834_1010 (size=13063) 2024-11-16T06:29:04,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741834_1010 (size=13063) 2024-11-16T06:29:04,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T06:29:04,983 INFO [RS:0;3456ee6a3164:35939 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T06:29:04,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x100473470b00001, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T06:29:04,983 INFO [RS:0;3456ee6a3164:35939 {}] regionserver.HRegionServer(1031): Exiting; stopping=3456ee6a3164,35939,1731738278639; zookeeper connection closed. 2024-11-16T06:29:04,983 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2202cf66 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2202cf66 2024-11-16T06:29:04,985 DEBUG [RS:2;3456ee6a3164:37533 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/oldWALs 2024-11-16T06:29:04,985 INFO [RS:2;3456ee6a3164:37533 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3456ee6a3164%2C37533%2C1731738278939:(num 1731738280719) 2024-11-16T06:29:04,985 DEBUG [RS:2;3456ee6a3164:37533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T06:29:04,985 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T06:29:04,985 INFO [RS:2;3456ee6a3164:37533 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T06:29:04,986 INFO [RS:2;3456ee6a3164:37533 {}] hbase.ChoreService(370): Chore service for: regionserver/3456ee6a3164:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T06:29:04,986 INFO [RS:2;3456ee6a3164:37533 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T06:29:04,986 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T06:29:04,987 INFO [RS:2;3456ee6a3164:37533 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37533 2024-11-16T06:29:04,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3456ee6a3164,37533,1731738278939 2024-11-16T06:29:04,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T06:29:04,991 INFO [RS:2;3456ee6a3164:37533 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T06:29:04,993 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3456ee6a3164,37533,1731738278939] 2024-11-16T06:29:04,996 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3456ee6a3164,37533,1731738278939 already deleted, retry=false 2024-11-16T06:29:04,996 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3456ee6a3164,37533,1731738278939 expired; onlineServers=0 2024-11-16T06:29:04,996 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3456ee6a3164,39381,1731738277818' ***** 2024-11-16T06:29:04,996 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T06:29:04,997 INFO [M:0;3456ee6a3164:39381 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T06:29:04,997 INFO [M:0;3456ee6a3164:39381 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T06:29:04,997 DEBUG [M:0;3456ee6a3164:39381 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T06:29:04,997 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T06:29:04,997 DEBUG [M:0;3456ee6a3164:39381 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T06:29:04,997 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731738280366 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731738280366,5,FailOnTimeoutGroup] 2024-11-16T06:29:04,997 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731738280363 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731738280363,5,FailOnTimeoutGroup] 2024-11-16T06:29:04,997 INFO [M:0;3456ee6a3164:39381 {}] hbase.ChoreService(370): Chore service for: master/3456ee6a3164:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T06:29:04,997 INFO [M:0;3456ee6a3164:39381 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T06:29:04,998 DEBUG [M:0;3456ee6a3164:39381 {}] master.HMaster(1795): Stopping service threads 2024-11-16T06:29:04,998 INFO [M:0;3456ee6a3164:39381 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T06:29:04,998 INFO [M:0;3456ee6a3164:39381 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T06:29:04,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T06:29:04,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T06:29:04,999 INFO [M:0;3456ee6a3164:39381 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T06:29:04,999 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T06:29:04,999 DEBUG [M:0;3456ee6a3164:39381 {}] zookeeper.ZKUtil(347): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T06:29:04,999 WARN [M:0;3456ee6a3164:39381 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T06:29:05,000 INFO [M:0;3456ee6a3164:39381 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/.lastflushedseqids 2024-11-16T06:29:05,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36185 is added to blk_1073742404_1580 (size=329) 2024-11-16T06:29:05,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073742404_1580 (size=329) 2024-11-16T06:29:05,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073742404_1580 (size=329) 2024-11-16T06:29:05,014 INFO [M:0;3456ee6a3164:39381 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T06:29:05,014 INFO [M:0;3456ee6a3164:39381 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T06:29:05,014 DEBUG [M:0;3456ee6a3164:39381 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T06:29:05,026 INFO [M:0;3456ee6a3164:39381 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T06:29:05,026 DEBUG [M:0;3456ee6a3164:39381 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T06:29:05,026 DEBUG [M:0;3456ee6a3164:39381 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T06:29:05,026 DEBUG [M:0;3456ee6a3164:39381 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T06:29:05,026 INFO [M:0;3456ee6a3164:39381 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=829.43 KB heapSize=994.28 KB 2024-11-16T06:29:05,027 ERROR [AsyncFSWAL-0-hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData-prefix:3456ee6a3164,39381,1731738277818 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData-prefix:3456ee6a3164,39381,1731738277818,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T06:29:05,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T06:29:05,094 INFO [RS:2;3456ee6a3164:37533 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T06:29:05,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37533-0x100473470b00003, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T06:29:05,094 INFO [RS:2;3456ee6a3164:37533 {}] regionserver.HRegionServer(1031): Exiting; stopping=3456ee6a3164,37533,1731738278939; zookeeper connection closed. 2024-11-16T06:29:05,094 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@239b5f48 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@239b5f48 2024-11-16T06:29:05,095 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-16T06:29:06,747 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:29:08,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:29:08,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T06:29:08,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T06:29:08,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-16T06:29:08,278 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-16T06:29:08,279 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:29:08,279 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-16T06:29:08,279 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-16T06:29:10,211 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-16T06:29:36,747 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:29:39,058 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-11-16T06:29:39,058 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-16T06:29:46,795 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;3456ee6a3164:39381 228 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 14 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@569eb48e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22ef4a98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3341 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.CountDownLatch$Sync@74291007 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10438 Waited count: 10951 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@4bdee1c2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@13bdc9ab Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@696e100d): State: TIMED_WAITING Blocked count: 0 Waited count: 663 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp274140085-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp274140085-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp274140085-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp274140085-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp274140085-41-acceptor-0@5291bd21-ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:40621}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp274140085-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp274140085-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp274140085-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-1e972b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 32 Waited count: 3123 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ee6fbbf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36767): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27d6be6a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@54873d29): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 32572 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1359 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31a0b22a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36767): State: TIMED_WAITING Blocked count: 92 Waited count: 2237 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36767): State: TIMED_WAITING Blocked count: 112 Waited count: 2241 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36767): State: TIMED_WAITING Blocked count: 88 Waited count: 2234 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36767): State: TIMED_WAITING Blocked count: 96 Waited count: 2213 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36767): State: TIMED_WAITING Blocked count: 84 Waited count: 2224 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@f16ac0b): State: TIMED_WAITING Blocked count: 0 Waited count: 166 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@44c4ec06): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@6f1ce4b8): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@7db6dfd0): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(988407592)): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp535842688-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp535842688-87-acceptor-0@24b9a97b-ServerConnector@605d5872{HTTP/1.1, (http/1.1)}{localhost:44725}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp535842688-88): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp535842688-89): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-54ca9e06-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@661bf352): State: TIMED_WAITING Blocked count: 0 Waited count: 660 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 38399): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 259 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3547e7b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1316 Waited count: 1419 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@33039414): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 337 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 112 (IPC Client (692179358) connection to localhost/127.0.0.1:36767 from jenkins): State: TIMED_WAITING Blocked count: 1262 Waited count: 1263 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp2004098342-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2004098342-122-acceptor-0@39b0bac7-ServerConnector@51a04cd6{HTTP/1.1, (http/1.1)}{localhost:34009}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2004098342-123): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 0 Waited count: 1922 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2004098342-124): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-36cb5fd6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1948be8e): State: TIMED_WAITING Blocked count: 0 Waited count: 659 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 41707): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 2 Waited count: 279 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50752c30 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1306 Waited count: 1399 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2f2fa19f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 334 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp945376875-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp945376875-158-acceptor-0@19c41b27-ServerConnector@198d5352{HTTP/1.1, (http/1.1)}{localhost:34783}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp945376875-159): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp945376875-160): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-61f32d00-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (ForkJoinPool-2-worker-2): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.ForkJoinPool@349c282e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 184 (ForkJoinPool-2-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 190 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1605ede6): State: TIMED_WAITING Blocked count: 0 Waited count: 658 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 199 (java.util.concurrent.ThreadPoolExecutor$Worker@47c76425[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@754696bd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (IPC Server idle connection scanner for port 43747): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 205 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 259 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45bca3c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1282 Waited count: 1403 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@13f0d714): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 195 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 329 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 329 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 3 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 329 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (IPC Server handler 4 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@45b62b23[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57949): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 165 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 344 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aae9da6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:57949):): State: WAITING Blocked count: 2 Waited count: 417 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68bcfe83 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 453 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@af5893e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 248 (LeaseRenewer:jenkins@localhost:36767): State: TIMED_WAITING Blocked count: 8 Waited count: 340 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@cef0a61 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 241 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:57949)): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 8 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@253260ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 69 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70e4bb67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@42c0b18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381): State: WAITING Blocked count: 56 Waited count: 268 Waiting on java.util.concurrent.Semaphore$NonfairSync@42eee269 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381): State: WAITING Blocked count: 165 Waited count: 621 Waiting on java.util.concurrent.Semaphore$NonfairSync@746fe5a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381): State: WAITING Blocked count: 53 Waited count: 6424 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@210547ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4515ec3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4515ec3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70dda35c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7b14223d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70a421a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@1aba4f8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@760d8c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 112 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;3456ee6a3164:39381): State: TIMED_WAITING Blocked count: 12 Waited count: 2671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1104/0x00007f93d0f8e7f8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@35a8fe16): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3254 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 101 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 77 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 32472 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 23 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@519a4d41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5efa3c3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6bf3a109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 9 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592cb5f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 504 (LeaseRenewer:jenkins.hfs.2@localhost:36767): State: TIMED_WAITING Blocked count: 8 Waited count: 338 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 32318 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 555 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 633 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 556 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 474 Waiting on java.util.concurrent.ForkJoinPool@554be74b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 565 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 630 Waiting on java.util.concurrent.ForkJoinPool@554be74b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 570 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 988 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 61 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f797580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1503 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@3d5adc02 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2125 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2134 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2135 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2927 (region-location-3): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2928 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5015 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5016 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5017 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8689 (AsyncFSWAL-1-hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData-prefix:3456ee6a3164,39381,1731738277818): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37a1055d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8693 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-16T06:30:06,748 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:30:36,748 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;3456ee6a3164:39381 225 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 14 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@569eb48e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22ef4a98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3941 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.CountDownLatch$Sync@770af6d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10438 Waited count: 10952 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@4bdee1c2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@13bdc9ab Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@696e100d): State: TIMED_WAITING Blocked count: 0 Waited count: 783 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp274140085-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp274140085-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp274140085-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp274140085-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp274140085-41-acceptor-0@5291bd21-ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:40621}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp274140085-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp274140085-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp274140085-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-1e972b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 32 Waited count: 3123 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ee6fbbf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36767): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27d6be6a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@54873d29): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 38499 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1359 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31a0b22a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36767): State: TIMED_WAITING Blocked count: 92 Waited count: 2297 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36767): State: TIMED_WAITING Blocked count: 112 Waited count: 2301 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36767): State: TIMED_WAITING Blocked count: 88 Waited count: 2294 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36767): State: TIMED_WAITING Blocked count: 96 Waited count: 2273 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36767): State: TIMED_WAITING Blocked count: 84 Waited count: 2284 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@f16ac0b): State: TIMED_WAITING Blocked count: 0 Waited count: 196 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@44c4ec06): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@6f1ce4b8): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@7db6dfd0): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(988407592)): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp535842688-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp535842688-87-acceptor-0@24b9a97b-ServerConnector@605d5872{HTTP/1.1, (http/1.1)}{localhost:44725}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp535842688-88): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp535842688-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-54ca9e06-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@661bf352): State: TIMED_WAITING Blocked count: 0 Waited count: 780 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 38399): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 279 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3547e7b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1336 Waited count: 1459 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@33039414): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 397 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 112 (IPC Client (692179358) connection to localhost/127.0.0.1:36767 from jenkins): State: TIMED_WAITING Blocked count: 1322 Waited count: 1323 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp2004098342-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2004098342-122-acceptor-0@39b0bac7-ServerConnector@51a04cd6{HTTP/1.1, (http/1.1)}{localhost:34009}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2004098342-123): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 0 Waited count: 1982 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2004098342-124): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-36cb5fd6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1948be8e): State: TIMED_WAITING Blocked count: 0 Waited count: 779 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 41707): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 2 Waited count: 299 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50752c30 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1326 Waited count: 1439 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2f2fa19f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 394 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp945376875-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp945376875-158-acceptor-0@19c41b27-ServerConnector@198d5352{HTTP/1.1, (http/1.1)}{localhost:34783}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp945376875-159): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp945376875-160): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-61f32d00-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (ForkJoinPool-2-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 190 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1605ede6): State: TIMED_WAITING Blocked count: 0 Waited count: 778 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 199 (java.util.concurrent.ThreadPoolExecutor$Worker@47c76425[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@754696bd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (IPC Server idle connection scanner for port 43747): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 205 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 279 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45bca3c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1302 Waited count: 1443 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@13f0d714): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 195 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 389 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 389 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 3 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 389 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (IPC Server handler 4 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@45b62b23[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57949): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 195 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 349 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aae9da6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:57949):): State: WAITING Blocked count: 2 Waited count: 422 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68bcfe83 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 458 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@af5893e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@cef0a61 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 273 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:57949)): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 8 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@253260ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 69 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70e4bb67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@42c0b18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381): State: WAITING Blocked count: 56 Waited count: 268 Waiting on java.util.concurrent.Semaphore$NonfairSync@42eee269 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381): State: WAITING Blocked count: 165 Waited count: 621 Waiting on java.util.concurrent.Semaphore$NonfairSync@746fe5a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381): State: WAITING Blocked count: 53 Waited count: 6424 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@210547ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4515ec3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4515ec3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70dda35c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7b14223d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70a421a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@1aba4f8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@760d8c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 112 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;3456ee6a3164:39381): State: TIMED_WAITING Blocked count: 12 Waited count: 2671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1104/0x00007f93d0f8e7f8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@35a8fe16): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3854 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 101 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 77 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@456e0a56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 38475 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 23 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@519a4d41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5efa3c3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6bf3a109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 9 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592cb5f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 523 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 38321 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 556 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 475 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 565 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 630 Waiting on java.util.concurrent.ForkJoinPool@554be74b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 570 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 988 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 61 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f797580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1503 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@3d5adc02 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2125 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2134 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2135 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2927 (region-location-3): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2928 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5015 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5016 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5017 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8689 (AsyncFSWAL-1-hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData-prefix:3456ee6a3164,39381,1731738277818): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37a1055d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8693 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8694 (java.util.concurrent.ThreadPoolExecutor$Worker@4ba5d436[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cdb9eff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-16T06:31:06,748 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:31:36,749 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;3456ee6a3164:39381 223 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 14 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@569eb48e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22ef4a98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4540 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.CountDownLatch$Sync@236febd4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10438 Waited count: 10953 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@4bdee1c2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@13bdc9ab Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@696e100d): State: TIMED_WAITING Blocked count: 0 Waited count: 903 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp274140085-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp274140085-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp274140085-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp274140085-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp274140085-41-acceptor-0@5291bd21-ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:40621}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp274140085-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp274140085-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp274140085-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-1e972b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 32 Waited count: 3123 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ee6fbbf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36767): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27d6be6a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@54873d29): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 44424 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1359 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31a0b22a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36767): State: TIMED_WAITING Blocked count: 92 Waited count: 2357 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36767): State: TIMED_WAITING Blocked count: 112 Waited count: 2362 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36767): State: TIMED_WAITING Blocked count: 88 Waited count: 2355 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36767): State: TIMED_WAITING Blocked count: 96 Waited count: 2333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36767): State: TIMED_WAITING Blocked count: 84 Waited count: 2344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@f16ac0b): State: TIMED_WAITING Blocked count: 0 Waited count: 226 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@44c4ec06): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@6f1ce4b8): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@7db6dfd0): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(988407592)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp535842688-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp535842688-87-acceptor-0@24b9a97b-ServerConnector@605d5872{HTTP/1.1, (http/1.1)}{localhost:44725}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp535842688-88): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp535842688-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-54ca9e06-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@661bf352): State: TIMED_WAITING Blocked count: 0 Waited count: 900 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 38399): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 299 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3547e7b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1356 Waited count: 1499 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@33039414): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 112 (IPC Client (692179358) connection to localhost/127.0.0.1:36767 from jenkins): State: TIMED_WAITING Blocked count: 1382 Waited count: 1383 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp2004098342-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2004098342-122-acceptor-0@39b0bac7-ServerConnector@51a04cd6{HTTP/1.1, (http/1.1)}{localhost:34009}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2004098342-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 0 Waited count: 2042 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2004098342-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-36cb5fd6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1948be8e): State: TIMED_WAITING Blocked count: 0 Waited count: 899 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 41707): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 2 Waited count: 319 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50752c30 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1346 Waited count: 1479 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2f2fa19f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 453 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 454 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp945376875-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp945376875-158-acceptor-0@19c41b27-ServerConnector@198d5352{HTTP/1.1, (http/1.1)}{localhost:34783}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp945376875-159): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp945376875-160): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-61f32d00-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1605ede6): State: TIMED_WAITING Blocked count: 0 Waited count: 898 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 199 (java.util.concurrent.ThreadPoolExecutor$Worker@47c76425[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@754696bd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (IPC Server idle connection scanner for port 43747): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 205 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 299 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45bca3c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1322 Waited count: 1483 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@13f0d714): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 195 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 449 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 449 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 3 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 449 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (IPC Server handler 4 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@45b62b23[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57949): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 225 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 353 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aae9da6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:57949):): State: WAITING Blocked count: 2 Waited count: 426 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68bcfe83 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 462 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@af5893e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@cef0a61 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 301 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:57949)): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 8 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@253260ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 69 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70e4bb67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@42c0b18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381): State: WAITING Blocked count: 56 Waited count: 268 Waiting on java.util.concurrent.Semaphore$NonfairSync@42eee269 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381): State: WAITING Blocked count: 165 Waited count: 621 Waiting on java.util.concurrent.Semaphore$NonfairSync@746fe5a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381): State: WAITING Blocked count: 53 Waited count: 6424 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@210547ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4515ec3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4515ec3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70dda35c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7b14223d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70a421a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@1aba4f8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@760d8c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 112 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;3456ee6a3164:39381): State: TIMED_WAITING Blocked count: 12 Waited count: 2671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1104/0x00007f93d0f8e7f8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@35a8fe16): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4453 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 101 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 77 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@456e0a56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44478 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 23 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@519a4d41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5efa3c3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6bf3a109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 9 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592cb5f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 523 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44323 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 565 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 631 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 570 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 988 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 404 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 61 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f797580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1503 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@3d5adc02 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2125 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2134 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2135 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2927 (region-location-3): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2928 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5015 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5016 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5017 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8689 (AsyncFSWAL-1-hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData-prefix:3456ee6a3164,39381,1731738277818): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37a1055d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8693 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8694 (java.util.concurrent.ThreadPoolExecutor$Worker@4ba5d436[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cdb9eff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-16T06:32:06,749 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:32:36,749 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;3456ee6a3164:39381 222 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 14 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@569eb48e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22ef4a98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.CountDownLatch$Sync@5a0f840f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10438 Waited count: 10954 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@4bdee1c2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@13bdc9ab Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@696e100d): State: TIMED_WAITING Blocked count: 0 Waited count: 1023 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp274140085-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp274140085-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp274140085-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp274140085-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp274140085-41-acceptor-0@5291bd21-ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:40621}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp274140085-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp274140085-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp274140085-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-1e972b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 32 Waited count: 3123 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ee6fbbf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36767): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27d6be6a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@54873d29): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 171 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 50348 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1359 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31a0b22a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36767): State: TIMED_WAITING Blocked count: 92 Waited count: 2417 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36767): State: TIMED_WAITING Blocked count: 112 Waited count: 2422 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36767): State: TIMED_WAITING Blocked count: 88 Waited count: 2415 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36767): State: TIMED_WAITING Blocked count: 96 Waited count: 2393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36767): State: TIMED_WAITING Blocked count: 84 Waited count: 2404 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@f16ac0b): State: TIMED_WAITING Blocked count: 0 Waited count: 256 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@44c4ec06): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@6f1ce4b8): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@7db6dfd0): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(988407592)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp535842688-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp535842688-87-acceptor-0@24b9a97b-ServerConnector@605d5872{HTTP/1.1, (http/1.1)}{localhost:44725}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp535842688-88): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp535842688-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-54ca9e06-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@661bf352): State: TIMED_WAITING Blocked count: 0 Waited count: 1020 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 38399): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 319 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3547e7b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1376 Waited count: 1539 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@33039414): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 534 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 112 (IPC Client (692179358) connection to localhost/127.0.0.1:36767 from jenkins): State: TIMED_WAITING Blocked count: 1442 Waited count: 1443 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp2004098342-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2004098342-122-acceptor-0@39b0bac7-ServerConnector@51a04cd6{HTTP/1.1, (http/1.1)}{localhost:34009}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2004098342-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 0 Waited count: 2102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2004098342-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-36cb5fd6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1948be8e): State: TIMED_WAITING Blocked count: 0 Waited count: 1019 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 41707): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 2 Waited count: 339 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50752c30 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1366 Waited count: 1519 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2f2fa19f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 514 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp945376875-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp945376875-158-acceptor-0@19c41b27-ServerConnector@198d5352{HTTP/1.1, (http/1.1)}{localhost:34783}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp945376875-159): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp945376875-160): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-61f32d00-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1605ede6): State: TIMED_WAITING Blocked count: 0 Waited count: 1018 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 199 (java.util.concurrent.ThreadPoolExecutor$Worker@47c76425[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@754696bd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (IPC Server idle connection scanner for port 43747): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 205 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 319 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45bca3c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1342 Waited count: 1523 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@13f0d714): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 195 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 3 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (IPC Server handler 4 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 511 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@45b62b23[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57949): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 255 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aae9da6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:57949):): State: WAITING Blocked count: 2 Waited count: 431 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68bcfe83 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 467 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@af5893e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@cef0a61 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 329 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:57949)): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 8 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@253260ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 69 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70e4bb67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@42c0b18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381): State: WAITING Blocked count: 56 Waited count: 268 Waiting on java.util.concurrent.Semaphore$NonfairSync@42eee269 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381): State: WAITING Blocked count: 165 Waited count: 621 Waiting on java.util.concurrent.Semaphore$NonfairSync@746fe5a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381): State: WAITING Blocked count: 53 Waited count: 6424 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@210547ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4515ec3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4515ec3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70dda35c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7b14223d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70a421a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@1aba4f8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@760d8c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 112 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;3456ee6a3164:39381): State: TIMED_WAITING Blocked count: 12 Waited count: 2671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1104/0x00007f93d0f8e7f8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@35a8fe16): State: TIMED_WAITING Blocked count: 0 Waited count: 169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5052 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 101 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 77 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@456e0a56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50480 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 23 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@519a4d41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5efa3c3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6bf3a109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 9 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592cb5f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 523 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50325 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 988 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 410 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 61 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f797580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1503 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@3d5adc02 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2125 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2134 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2135 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2927 (region-location-3): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2928 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5015 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5016 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5017 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8689 (AsyncFSWAL-1-hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData-prefix:3456ee6a3164,39381,1731738277818): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37a1055d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8693 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8694 (java.util.concurrent.ThreadPoolExecutor$Worker@4ba5d436[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cdb9eff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-16T06:33:06,750 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:33:36,750 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:34:05,028 DEBUG [M:0;3456ee6a3164:39381 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731738545014Disabling compacts and flushes for region at 1731738545014Disabling writes for close at 1731738545026 (+12 ms)Obtaining lock to block concurrent updates at 1731738545026Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731738545026Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=849334, getHeapSize=1018080, getOffHeapSize=0, getCellsCount=2221 at 1731738545027 (+1 ms)Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1731738845028 (+300001 ms) 2024-11-16T06:34:05,029 WARN [M:0;3456ee6a3164:39381 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3824, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3824, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-16T06:34:05,033 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T06:34:05,038 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-16T06:34:05,039 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-16T06:34:05,039 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/WALs/3456ee6a3164,39381,1731738277818/3456ee6a3164%2C39381%2C1731738277818.1731738279496 2024-11-16T06:34:05,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/WALs/3456ee6a3164,39381,1731738277818/3456ee6a3164%2C39381%2C1731738277818.1731738279496 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T06:34:05,043 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T06:34:05,043 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/WALs/3456ee6a3164,39381,1731738277818/3456ee6a3164%2C39381%2C1731738277818.1731738279496 2024-11-16T06:34:05,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/WALs/3456ee6a3164,39381,1731738277818/3456ee6a3164%2C39381%2C1731738277818.1731738279496 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;3456ee6a3164:39381 225 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 14 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@569eb48e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22ef4a98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 31 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 58 Waiting on java.util.concurrent.CountDownLatch$Sync@7a1345e8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10438 Waited count: 10955 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@4bdee1c2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@13bdc9ab Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@696e100d): State: TIMED_WAITING Blocked count: 0 Waited count: 1143 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp274140085-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp274140085-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp274140085-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp274140085-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp274140085-41-acceptor-0@5291bd21-ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:40621}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp274140085-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp274140085-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp274140085-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-1e972b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 32 Waited count: 3123 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ee6fbbf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36767): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27d6be6a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 193 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@54873d29): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 56273 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1359 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31a0b22a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36767): State: TIMED_WAITING Blocked count: 92 Waited count: 2477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36767): State: TIMED_WAITING Blocked count: 112 Waited count: 2482 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36767): State: TIMED_WAITING Blocked count: 88 Waited count: 2475 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36767): State: TIMED_WAITING Blocked count: 96 Waited count: 2453 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36767): State: TIMED_WAITING Blocked count: 84 Waited count: 2464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@f16ac0b): State: TIMED_WAITING Blocked count: 0 Waited count: 286 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@44c4ec06): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@6f1ce4b8): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@7db6dfd0): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(988407592)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp535842688-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp535842688-87-acceptor-0@24b9a97b-ServerConnector@605d5872{HTTP/1.1, (http/1.1)}{localhost:44725}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp535842688-88): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp535842688-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-54ca9e06-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@661bf352): State: TIMED_WAITING Blocked count: 0 Waited count: 1140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 38399): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 339 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3547e7b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1396 Waited count: 1579 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@33039414): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 602 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 112 (IPC Client (692179358) connection to localhost/127.0.0.1:36767 from jenkins): State: TIMED_WAITING Blocked count: 1502 Waited count: 1503 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp2004098342-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2004098342-122-acceptor-0@39b0bac7-ServerConnector@51a04cd6{HTTP/1.1, (http/1.1)}{localhost:34009}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2004098342-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 0 Waited count: 2162 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2004098342-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-36cb5fd6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1948be8e): State: TIMED_WAITING Blocked count: 0 Waited count: 1139 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 41707): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 2 Waited count: 359 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50752c30 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1386 Waited count: 1559 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2f2fa19f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 41707): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp945376875-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f93d042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp945376875-158-acceptor-0@19c41b27-ServerConnector@198d5352{HTTP/1.1, (http/1.1)}{localhost:34783}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp945376875-159): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp945376875-160): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-61f32d00-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1605ede6): State: TIMED_WAITING Blocked count: 0 Waited count: 1138 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 199 (java.util.concurrent.ThreadPoolExecutor$Worker@47c76425[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@754696bd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (IPC Server idle connection scanner for port 43747): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 205 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 339 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45bca3c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767): State: TIMED_WAITING Blocked count: 1362 Waited count: 1563 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@13f0d714): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 195 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 3 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (IPC Server handler 4 on default port 43747): State: TIMED_WAITING Blocked count: 0 Waited count: 576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5/current/BP-2134272360-172.17.0.2-1731738273472): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@45b62b23[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57949): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 285 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 362 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aae9da6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:57949):): State: WAITING Blocked count: 2 Waited count: 435 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68bcfe83 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 471 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@af5893e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@cef0a61 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 357 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:57949)): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 8 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@253260ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 69 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70e4bb67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 5 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42e2ec36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@42c0b18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39381): State: WAITING Blocked count: 56 Waited count: 268 Waiting on java.util.concurrent.Semaphore$NonfairSync@42eee269 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381): State: WAITING Blocked count: 165 Waited count: 621 Waiting on java.util.concurrent.Semaphore$NonfairSync@746fe5a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381): State: WAITING Blocked count: 53 Waited count: 6424 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@210547ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4515ec3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4515ec3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70dda35c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7b14223d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@70a421a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39381): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@1aba4f8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@760d8c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 112 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;3456ee6a3164:39381): State: TIMED_WAITING Blocked count: 12 Waited count: 2672 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1345/0x00007f93d1209a00.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@35a8fe16): State: TIMED_WAITING Blocked count: 0 Waited count: 189 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5652 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 101 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 77 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@456e0a56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56482 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 23 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@519a4d41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5efa3c3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6bf3a109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/3456ee6a3164:0.procedureResultReporter): State: WAITING Blocked count: 9 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592cb5f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 523 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56327 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 988 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 416 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 61 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f797580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1503 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@3d5adc02 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2125 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2134 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2135 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2927 (region-location-3): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2928 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f0bd6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5015 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5016 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5017 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8689 (AsyncFSWAL-1-hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData-prefix:3456ee6a3164,39381,1731738277818): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37a1055d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8693 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8694 (java.util.concurrent.ThreadPoolExecutor$Worker@4ba5d436[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cdb9eff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8695 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8698 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8699 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1338/0x00007f93d11fe390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-16T06:34:06,751 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T06:34:09,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/WALs/3456ee6a3164,39381,1731738277818/3456ee6a3164%2C39381%2C1731738277818.1731738279496 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T06:34:10,033 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-16T06:34:10,034 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T06:34:10,034 INFO [M:0;3456ee6a3164:39381 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T06:34:10,035 INFO [M:0;3456ee6a3164:39381 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39381 2024-11-16T06:34:10,036 INFO [M:0;3456ee6a3164:39381 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T06:34:10,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36767/user/jenkins/test-data/87ceb2e6-263d-83e2-861e-b50b339612b7/MasterData/WALs/3456ee6a3164,39381,1731738277818/3456ee6a3164%2C39381%2C1731738277818.1731738279496 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-16T06:34:10,138 INFO [M:0;3456ee6a3164:39381 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T06:34:10,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T06:34:10,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x100473470b00000, quorum=127.0.0.1:57949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T06:34:10,145 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cfd34d2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T06:34:10,146 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@198d5352{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T06:34:10,146 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T06:34:10,147 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b54b674{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T06:34:10,147 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27b64e3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,STOPPED} 2024-11-16T06:34:10,149 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T06:34:10,149 WARN [BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T06:34:10,149 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T06:34:10,149 WARN [BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2134272360-172.17.0.2-1731738273472 (Datanode Uuid 5d296949-a934-44b1-8205-e41aa36a0acd) service to localhost/127.0.0.1:36767 2024-11-16T06:34:10,151 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data5/current/BP-2134272360-172.17.0.2-1731738273472 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T06:34:10,151 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data6/current/BP-2134272360-172.17.0.2-1731738273472 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T06:34:10,152 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T06:34:10,154 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c1dd7bf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T06:34:10,155 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51a04cd6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T06:34:10,155 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T06:34:10,155 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4109d9bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T06:34:10,155 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1800a749{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,STOPPED} 2024-11-16T06:34:10,157 WARN [BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T06:34:10,157 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T06:34:10,157 WARN [BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2134272360-172.17.0.2-1731738273472 (Datanode Uuid a232a18a-a31a-4c4d-92dc-644c05f14d99) service to localhost/127.0.0.1:36767 2024-11-16T06:34:10,157 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T06:34:10,157 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data3/current/BP-2134272360-172.17.0.2-1731738273472 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T06:34:10,158 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data4/current/BP-2134272360-172.17.0.2-1731738273472 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T06:34:10,158 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T06:34:10,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e8ba092{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T06:34:10,161 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@605d5872{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T06:34:10,161 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T06:34:10,161 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@516ed17d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T06:34:10,161 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37223f11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,STOPPED} 2024-11-16T06:34:10,162 WARN [BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T06:34:10,162 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T06:34:10,162 WARN [BP-2134272360-172.17.0.2-1731738273472 heartbeating to localhost/127.0.0.1:36767 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2134272360-172.17.0.2-1731738273472 (Datanode Uuid 45c81a13-af13-4abd-be13-e699e7afb52a) service to localhost/127.0.0.1:36767 2024-11-16T06:34:10,162 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T06:34:10,163 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data1/current/BP-2134272360-172.17.0.2-1731738273472 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T06:34:10,163 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/cluster_656590c2-bf56-f6bd-fc4a-567b272c49ae/data/data2/current/BP-2134272360-172.17.0.2-1731738273472 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T06:34:10,164 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T06:34:10,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d3f6b4f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T06:34:10,171 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T06:34:10,171 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T06:34:10,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cebb95a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T06:34:10,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5140b357{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/5d5631fe-b692-553e-c0db-c4d63dbcb0b1/hadoop.log.dir/,STOPPED} 2024-11-16T06:34:10,184 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T06:34:10,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down